From 41d09c7411b1474d2ef63542253a7b259faee335 Mon Sep 17 00:00:00 2001
From: Gijs Molenaar
Date: Wed, 4 Apr 2018 14:50:17 +0200
Subject: [PATCH 01/13] add test data
---
test/3C147-HI6.refmodel.lsm | 684 ++++++++++++++++++++++++++++++++++++
1 file changed, 684 insertions(+)
create mode 100644 test/3C147-HI6.refmodel.lsm
diff --git a/test/3C147-HI6.refmodel.lsm b/test/3C147-HI6.refmodel.lsm
new file mode 100644
index 0000000..c48959c
--- /dev/null
+++ b/test/3C147-HI6.refmodel.lsm
@@ -0,0 +1,684 @@
+
+Source list
+
+Plot styles
+
+
+Other properties
+Field centre ra: 1.4948845339 dec: 0.8700817014
+
From 3ffa8ba4ff46b684c21aa153f558e459cf782ce1 Mon Sep 17 00:00:00 2001
From: Gijs Molenaar
Date: Thu, 5 Apr 2018 16:22:02 +0200
Subject: [PATCH 02/13] ditch pyfits
---
Tigger/Coordinates.py | 3 ++-
Tigger/Models/Formats/AIPSCCFITS.py | 2 +-
Tigger/Tools/Imaging.py | 2 +-
Tigger/__init__.py | 17 ++---------------
Tigger/bin/tigger-convert | 5 ++---
Tigger/bin/tigger-make-brick | 2 +-
Tigger/bin/tigger-restore | 3 +--
Tigger/bin/tigger-tag | 3 +--
setup.py | 2 +-
9 files changed, 12 insertions(+), 27 deletions(-)
diff --git a/Tigger/Coordinates.py b/Tigger/Coordinates.py
index 1c9fe69..a792b0e 100644
--- a/Tigger/Coordinates.py
+++ b/Tigger/Coordinates.py
@@ -36,7 +36,8 @@
import Kittens.utils
-pyfits = Kittens.utils.import_pyfits();
+from astropy.io import fits as pyfits
+
startup_dprint(1,"imported pyfits");
DEG = math.pi/180;
diff --git a/Tigger/Models/Formats/AIPSCCFITS.py b/Tigger/Models/Formats/AIPSCCFITS.py
index 993c70e..704b39a 100644
--- a/Tigger/Models/Formats/AIPSCCFITS.py
+++ b/Tigger/Models/Formats/AIPSCCFITS.py
@@ -33,7 +33,7 @@
import re
import numpy
-import pyfits
+from astropy.io import fits as pyfits
import Kittens.utils
diff --git a/Tigger/Tools/Imaging.py b/Tigger/Tools/Imaging.py
index 0c47455..990c165 100644
--- a/Tigger/Tools/Imaging.py
+++ b/Tigger/Tools/Imaging.py
@@ -27,7 +27,7 @@
#
import Kittens.utils
-pyfits = Kittens.utils.import_pyfits();
+from astropy.io import fits as pyfits
import math
import numpy
diff --git a/Tigger/__init__.py b/Tigger/__init__.py
index f639fae..d4875dc 100644
--- a/Tigger/__init__.py
+++ b/Tigger/__init__.py
@@ -43,21 +43,8 @@
dprintf = _verbosity.dprintf
def import_pyfits ():
- """Helper function to import pyfits and return it. Provides a workaround for
- pyfits-2.3, which is actually arrogant enough (fuck you with a bargepole, pyfits!)
- to replace the standard warnings.formatwarning function with its own BROKEN version,
- thus breaking all other code that uses the warnings module."""
- if 'pyfits' not in sys.modules:
- import pyfits
- import warnings
- if getattr(pyfits,'formatwarning',None) is warnings.formatwarning:
- def why_is_pyfits_overriding_warnings_formatwarning_with_a_broken_one_damn_you_pyfits (message,category, filename,lineno,line=None):
- return str(message)+'\n'
- warnings.formatwarning = why_is_pyfits_overriding_warnings_formatwarning_with_a_broken_one_damn_you_pyfits
- if getattr(pyfits,'showwarning',None) is warnings.showwarning:
- def showwarning_damn_you_pyfits_damn_you_sincerely (message,category,filename,lineno,file=None,line=None):
- pyfits.showwarning(message,category,filename,lineno,file=file)
- warnings.showwarning = showwarning_damn_you_pyfits_damn_you_sincerely
+ # leaving this here for backwards compatibility
+ from astropy.io import fits as pyfits
return pyfits
diff --git a/Tigger/bin/tigger-convert b/Tigger/bin/tigger-convert
index 4153673..92a0bea 100755
--- a/Tigger/bin/tigger-convert
+++ b/Tigger/bin/tigger-convert
@@ -27,11 +27,10 @@
#
import sys
-import pyfits
+from astropy.io import fits as pyfits
import re
import os.path
import glob
-import pyfits
import math
import numpy
import traceback
@@ -504,7 +503,7 @@ is a Tigger model (-f switch must be specified to allow overwriting), or else a
if [ src.name for src in sources if src.name == name ]:
print "Error: model already contains a source named '%s'"%name;
# add brick
- import pyfits
+ from astropy.io import fits as pyfits
from astLib.astWCS import WCS
input_hdu = pyfits.open(fitsfile)[0];
hdr = input_hdu.header;
diff --git a/Tigger/bin/tigger-make-brick b/Tigger/bin/tigger-make-brick
index 1e6a5be..e00aaae 100755
--- a/Tigger/bin/tigger-make-brick
+++ b/Tigger/bin/tigger-make-brick
@@ -27,7 +27,7 @@
#
import os.path
-import pyfits
+from astropy.io import fits as pyfits
import Tigger
import math
from math import *
diff --git a/Tigger/bin/tigger-restore b/Tigger/bin/tigger-restore
index 8fcefca..045944a 100755
--- a/Tigger/bin/tigger-restore
+++ b/Tigger/bin/tigger-restore
@@ -27,11 +27,10 @@
#
import sys
-import pyfits
+from astropy.io import fits as pyfits
import re
import os.path
import os
-import pyfits
import math
if __name__ == '__main__':
diff --git a/Tigger/bin/tigger-tag b/Tigger/bin/tigger-tag
index b5a7c21..faf8e7f 100755
--- a/Tigger/bin/tigger-tag
+++ b/Tigger/bin/tigger-tag
@@ -27,10 +27,9 @@
#
import sys
-import pyfits
+from astropy.io import fits as pyfits
import re
import os.path
-import pyfits
import math
import numpy
import traceback
diff --git a/setup.py b/setup.py
index df350c3..b2df9de 100644
--- a/setup.py
+++ b/setup.py
@@ -5,7 +5,7 @@
__version__ = "1.4.2"
-requirements = ['astro_kittens', 'numpy', 'scipy', 'astlib', 'pyfits']
+requirements = ['astro_kittens', 'numpy', 'scipy', 'astlib', 'astropy']
scripts = [
'Tigger/bin/tigger-convert',
From 403db22d153929e28fab0544e976957953456159 Mon Sep 17 00:00:00 2001
From: Gijs Molenaar
Date: Thu, 5 Apr 2018 17:04:07 +0200
Subject: [PATCH 03/13] add test
---
.dockerignore | 4 ++++
.travis.yml | 2 +-
Dockerfile | 17 ++++++++---------
Makefile | 14 --------------
4 files changed, 13 insertions(+), 24 deletions(-)
create mode 100644 .dockerignore
delete mode 100644 Makefile
diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 0000000..b4806da
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,4 @@
+.git
+.gitignore
+.idea/
+.venv2/
diff --git a/.travis.yml b/.travis.yml
index d6c0107..0418e12 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -5,7 +5,7 @@ services:
python:
- '2.7'
install:
-- docker build . -t tigger-lsm
+- docker build . -t ska-sa/tigger-lsm
script:
- true
deploy:
diff --git a/Dockerfile b/Dockerfile
index 706a681..18cf456 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,9 +1,8 @@
-FROM kernsuite/base:2
-
-MAINTAINER gijsmolenaar@gmail.com
-
-ADD . /tmp/tigger-lsm
-
-RUN pip install /tmp/tigger-lsm
-
-CMD /usr/local/bin/tigger-convert
+FROM kernsuite/base:3
+RUN docker-apt-install python-pip
+RUN docker-apt-install python-setuptools python-numpy python-scipy python-astropy python-kittens
+ADD . /code
+RUN pip install /code
+RUN /usr/local/bin/tigger-convert /code/test/3C147-HI6.refmodel.lsm /tmp/output.txt
+RUN echo "the next command should not print 1"
+RUN wc -l /tmp/output.txt
diff --git a/Makefile b/Makefile
deleted file mode 100644
index fd9f624..0000000
--- a/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
-DOCKER_REPO=radioastro/tigger:1.3.3
-
-.PHONY: build clean
-
-all: build
-
-build:
- docker build -t ${DOCKER_REPO} .
-
-clean:
- docker rmi ${DOCKER_REPO}
-
-upload: build
- docker push ${DOCKER_REPO}
From d06e036b695057e9bc724931267d0b26939e3472 Mon Sep 17 00:00:00 2001
From: Gijs Molenaar
Date: Fri, 6 Apr 2018 11:25:58 +0200
Subject: [PATCH 04/13] remove all import *
---
Tigger/Models/SkyModel.py | 4 ++--
Tigger/SiameseInterface.py | 8 ++++----
Tigger/Tools/gaussfitter2.py | 24 ++++++++++++------------
Tigger/bin/tigger-convert | 1 -
Tigger/bin/tigger-make-brick | 3 ++-
Tigger/bin/tigger-restore | 1 -
6 files changed, 20 insertions(+), 21 deletions(-)
diff --git a/Tigger/Models/SkyModel.py b/Tigger/Models/SkyModel.py
index ddd2c95..48671d9 100644
--- a/Tigger/Models/SkyModel.py
+++ b/Tigger/Models/SkyModel.py
@@ -24,7 +24,7 @@
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
-from ModelClasses import *
+from ModelClasses import ModelItem
import PlotStyles
import re
@@ -48,7 +48,7 @@ def add (self,tag):
def get (self,tagname):
"""Returns ModelTag object associated with tag name, inserting a new one if not found""";
- return self.tags.setdefault(name,ModelTag(name));
+ return self.tags.setdefault(tagname,ModelTag(tagname));
def getAll (self):
all = self.tags.values();
diff --git a/Tigger/SiameseInterface.py b/Tigger/SiameseInterface.py
index 0d5ec2c..ba776f1 100644
--- a/Tigger/SiameseInterface.py
+++ b/Tigger/SiameseInterface.py
@@ -23,9 +23,10 @@
# along with this program; if not, see ,
# or write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-#
-from Timba.TDL import *
+import sys
+
+from Timba.TDL import TDLCompileOptions, TDLRuntimeOptions, TDLRuntimeOptions, TDLOption, TDLFileSelect, TDLMenu
from Timba.utils import curry
import traceback
import Meow
@@ -33,7 +34,6 @@
import Meow.Context
import Meow.ParmGroup
import math
-from math import *
import os.path
from Meow.MeqMaker import SourceSubsetSelector
@@ -77,7 +77,7 @@ def compile_options (self):
"""Returns list of compile-time options""";
if not self._compile_opts:
self._compile_opts = [
- TDLOption("filename","Tigger LSM file",
+ TDLRuntimeOptions("filename","Tigger LSM file",
TDLFileSelect("Tigger models (*."+ModelHTML.DefaultExtension+");;All files (*)",default=self.filename,exist=True),
namespace=self),
TDLOption('lsm_subset',"Source subset",["all"],more=str,namespace=self,
diff --git a/Tigger/Tools/gaussfitter2.py b/Tigger/Tools/gaussfitter2.py
index c2366a1..ac02aba 100644
--- a/Tigger/Tools/gaussfitter2.py
+++ b/Tigger/Tools/gaussfitter2.py
@@ -26,7 +26,7 @@
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
-from numpy import *
+import numpy as np
from scipy import optimize
from scipy import stats
@@ -36,13 +36,13 @@ def moments (data,circle,rotate,vheight):
moments. Depending on the input parameters, will only output
a subset of the above"""
total = data.sum()
- X, Y = indices(data.shape)
+ X, Y = np.ndices(data.shape)
x = (X*data).sum()/total
y = (Y*data).sum()/total
col = data[:, int(y)]
- width_x = sqrt(abs((arange(col.size)-y)**2*col).sum()/col.sum())
+ width_x = np.sqrt(abs((np.arange(col.size)-y)**2*col).sum()/col.sum())
row = data[int(x), :]
- width_y = sqrt(abs((arange(row.size)-x)**2*row).sum()/row.sum())
+ width_y = np.sqrt(abs((np.arange(row.size)-x)**2*row).sum()/row.sum())
width = ( width_x + width_y ) / 2.
height = stats.mode(data.ravel())[0][0] if vheight else 0;
amplitude = data.max()-height
@@ -100,9 +100,9 @@ def twodgaussian(inpars, circle, rotate, vheight):
width_y = float(width_y)
if rotate == 1:
rota = inpars.pop(0)
- rota = pi/180. * float(rota)
- rcen_x = center_x * cos(rota) - center_y * sin(rota)
- rcen_y = center_x * sin(rota) + center_y * cos(rota)
+ rota = np.pi/180. * float(rota)
+ rcen_x = center_x * np.cos(rota) - center_y * np.sin(rota)
+ rcen_y = center_x * np.sin(rota) + center_y * np.cos(rota)
else:
rcen_x = center_x
rcen_y = center_y
@@ -112,12 +112,12 @@ def twodgaussian(inpars, circle, rotate, vheight):
def rotgauss(x,y):
if rotate==1:
- xp = x * cos(rota) - y * sin(rota)
- yp = x * sin(rota) + y * cos(rota)
+ xp = x * np.cos(rota) - y * np.sin(rota)
+ yp = x * np.sin(rota) + y * np.cos(rota)
else:
xp = x
yp = y
- g = height+amplitude*exp(
+ g = height+amplitude*np.exp(
-(((rcen_x-xp)/width_x)**2+
((rcen_y-yp)/width_y)**2)/2.)
return g
@@ -156,9 +156,9 @@ def gaussfit(data,err=None,params=[],autoderiv=1,return_all=0,circle=0,rotate=1,
if params == []:
params = (moments(data,circle,rotate,vheight))
if err == None:
- errorfunction = lambda p: ravel((twodgaussian(p,circle,rotate,vheight)(*indices(data.shape)) - data))
+ errorfunction = lambda p: np.ravel((twodgaussian(p,circle,rotate,vheight)(*np.indices(data.shape)) - data))
else:
- errorfunction = lambda p: ravel((twodgaussian(p,circle,rotate,vheight)(*indices(data.shape)) - data)/err)
+ errorfunction = lambda p: np.ravel((twodgaussian(p,circle,rotate,vheight)(*np.indices(data.shape)) - data)/err)
if autoderiv == 0:
# the analytic derivative, while not terribly difficult, is less efficient and useful. I only bothered
# putting it here because I was instructed to do so for a class project - please ask if you would like
diff --git a/Tigger/bin/tigger-convert b/Tigger/bin/tigger-convert
index 92a0bea..94601f5 100755
--- a/Tigger/bin/tigger-convert
+++ b/Tigger/bin/tigger-convert
@@ -684,7 +684,6 @@ is a Tigger model (-f switch must be specified to allow overwriting), or else a
else: #else, assume pb is an expession
try:
- from math import *
pbexp = eval('lambda r,fq:'+pb);
dum = pbexp(0,1e+9); # evaluate at r=0 and 1 GHz as a test
if not isinstance(dum,float):
diff --git a/Tigger/bin/tigger-make-brick b/Tigger/bin/tigger-make-brick
index e00aaae..7cbdffd 100755
--- a/Tigger/bin/tigger-make-brick
+++ b/Tigger/bin/tigger-make-brick
@@ -26,11 +26,12 @@
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
+import sys
import os.path
from astropy.io import fits as pyfits
import Tigger
import math
-from math import *
+from math import cos
from astLib.astWCS import WCS
DEG = math.pi/180;
diff --git a/Tigger/bin/tigger-restore b/Tigger/bin/tigger-restore
index 045944a..1dd6cdb 100755
--- a/Tigger/bin/tigger-restore
+++ b/Tigger/bin/tigger-restore
@@ -190,7 +190,6 @@ an output image is not specified, makes a name for it automatically.""");
if options.pb and model.primaryBeam():
try:
- from math import *
pbexp = eval('lambda r,fq:'+model.primaryBeam());
dum = pbexp(0,1e+9); # evaluate at r=0 and 1 GHz as a test
if not isinstance(dum,float):
From b7db7a5890e5e1f606fe25e367d1b6f3657e9b82 Mon Sep 17 00:00:00 2001
From: Gijs Molenaar
Date: Fri, 6 Apr 2018 11:30:55 +0200
Subject: [PATCH 05/13] pythonize
---
Tigger/Coordinates.py | 534 +++----
Tigger/Models/Formats/AIPSCC.py | 137 +-
Tigger/Models/Formats/AIPSCCFITS.py | 137 +-
Tigger/Models/Formats/ASCII.py | 846 ++++++------
Tigger/Models/Formats/BBS.py | 704 +++++-----
Tigger/Models/Formats/ModelHTML.py | 320 ++---
Tigger/Models/Formats/NEWSTAR.py | 618 ++++-----
Tigger/Models/Formats/PyBDSMGaul.py | 113 +-
Tigger/Models/Formats/__init__.py | 191 +--
Tigger/Models/ModelClasses.py | 835 +++++------
Tigger/Models/PlotStyles.py | 145 +-
Tigger/Models/SkyModel.py | 833 +++++------
Tigger/Models/__init__.py | 3 +-
Tigger/SiameseInterface.py | 433 +++---
Tigger/Tools/FITSHeaders.py | 24 +-
Tigger/Tools/Imaging.py | 1009 +++++++-------
Tigger/Tools/__init__.py | 2 +-
Tigger/Tools/gaussfitter2.py | 68 +-
Tigger/__init__.py | 53 +-
Tigger/bin/tigger-convert | 1994 ++++++++++++++-------------
Tigger/bin/tigger-make-brick | 394 +++---
Tigger/bin/tigger-restore | 348 ++---
Tigger/bin/tigger-tag | 654 ++++-----
23 files changed, 5271 insertions(+), 5124 deletions(-)
diff --git a/Tigger/Coordinates.py b/Tigger/Coordinates.py
index a792b0e..c3ccae5 100644
--- a/Tigger/Coordinates.py
+++ b/Tigger/Coordinates.py
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
#
-#% $Id$
+# % $Id$
#
#
# Copyright (C) 2002-2011
@@ -26,129 +26,137 @@
import Tigger
from Tigger import startup_dprint
-startup_dprint(1,"start of Coordinates");
-import sys
+startup_dprint(1, "start of Coordinates")
+
import math
import numpy
-from numpy import sin,cos,arcsin,arccos;
-startup_dprint(1,"imported numpy");
+from numpy import sin, cos
+startup_dprint(1, "imported numpy")
-import Kittens.utils
from astropy.io import fits as pyfits
-startup_dprint(1,"imported pyfits");
+startup_dprint(1, "imported pyfits")
-DEG = math.pi/180;
+DEG = math.pi / 180
-startup_dprint(1,"importing WCS");
+startup_dprint(1, "importing WCS")
# If we're being imported outside the main app (e.g. a script is trying to read a Tigger model,
# whether TDL or otherwise), then pylab may be needed by that script for decent God-fearing
# purposes. Since WCS is going to pull it in anyway, we try to import it here, and if that
# fails, replace it by dummies.
if not Tigger.matplotlib_nuked:
- try:
- import pylab;
- except:
- Tigger.nuke_matplotlib();
+ try:
+ import pylab
+ except:
+ Tigger.nuke_matplotlib()
# some locales cause WCS to complain that "." is not the decimal separator, so reset it to "C"
import locale
+
locale.setlocale(locale.LC_NUMERIC, 'C')
-
try:
- from astLib.astWCS import WCS
- import PyWCSTools.wcs
+ from astLib.astWCS import WCS
+ import PyWCSTools.wcs
except ImportError:
- print "Failed to import the astLib.astWCS and/or PyWCSTools module. Please install the astLib package (http://astlib.sourceforge.net/)."
- raise;
-
-startup_dprint(1,"imported WCS");
-
-def angular_dist_pos_angle (ra1,dec1,ra2,dec2):
- """Computes the angular distance between the two points on a sphere, and
- the position angle (North through East) of the direction from 1 to 2.""";
- # I lifted this somewhere
- sind1,sind2 = sin(dec1),sin(dec2);
- cosd1,cosd2 = cos(dec1),cos(dec2);
- cosra,sinra = cos(ra1-ra2),sin(ra1-ra2);
-
- adist = numpy.arccos(min(sind1*sind2 + cosd1*cosd2*cosra,1));
- pa = numpy.arctan2(-cosd2*sinra,-cosd2*sind1*cosra+sind2*cosd1);
- return adist,pa;
-
-def angular_dist_pos_angle2 (ra1,dec1,ra2,dec2):
- """Computes the angular distance between the two points on a sphere, and
- the position angle (North through East) of the direction from 1 to 2.""";
- # I re-derived this from Euler angles, but it seems to be identical to the above
- ra = ra2 - ra1;
- sind0,sind,cosd0,cosd = sin(dec1),sin(dec2),cos(dec1),cos(dec2);
- sina,cosa = sin(ra)*cosd,cos(ra)*cosd;
- x = cosa*sind0 - sind*cosd0;
- y = sina;
- z = cosa*cosd0 + sind*sind0;
- print x,y,z;
- PA = numpy.arctan2(y,-x);
- R = numpy.arccos(z);
-
- return R,PA;
-
-def angular_dist_pos_angle2 (ra1,dec1,ra2,dec2):
- """Computes the angular distance between the two points on a sphere, and
- the position angle (North through East) of the direction from 1 to 2.""";
- # I re-derived this from Euler angles, but it seems to be identical to the above
- ra = ra2 - ra1;
- sind0,sind,cosd0,cosd = sin(dec1),sin(dec2),cos(dec1),cos(dec2);
- sina,cosa = sin(ra)*cosd,cos(ra)*cosd;
- x = cosa*sind0 - sind*cosd0;
- y = sina;
- z = cosa*cosd0 + sind*sind0;
- print x,y,z;
- PA = numpy.arctan2(y,-x);
- R = numpy.arccos(z);
- return R,PA;
-
-
-
-def _deg_to_dms (x,prec=0.01):
- """Converts x (in degrees) into d,m,s tuple, where d and m are ints.
- prec gives the precision, in arcseconds."""
- mins,secs = divmod(round(x*3600/prec)*prec,60);
- mins = int(mins);
- degs,mins = divmod(mins,60);
- return degs,mins,secs;
-
-def ra_hms (rad,scale=12,prec=0.01):
- """Returns RA as tuple of (h,m,s)""";
- # convert negative values
- while rad < 0:
- rad += 2*math.pi;
- # convert to hours
- rad *= scale/math.pi;
- return _deg_to_dms(rad,prec);
-
-def dec_dms (rad,prec=0.01):
- return dec_sdms(rad,prec)[1:];
-
-def dec_sdms (rad,prec=0.01):
- """Returns Dec as tuple of (sign,d,m,s). Sign is "+" or "-".""";
- sign = "-" if rad<0 else "+";
- d,m,s = _deg_to_dms(abs(rad)/DEG,prec);
- return (sign,d,m,s);
-
-def ra_hms_string (rad):
- return "%dh%02dm%05.2fs"%ra_hms(rad);
-
-def dec_sdms_string (rad):
- return "%s%dd%02dm%05.2fs"%dec_sdms(rad);
-
-def radec_string (ra,dec):
- return "%s %s"%(ra_hms_string(ra),dec_sdms_string(dec));
-
-class _Projector (object):
+ print "Failed to import the astLib.astWCS and/or PyWCSTools module. Please install the astLib package (http://astlib.sourceforge.net/)."
+ raise
+
+startup_dprint(1, "imported WCS")
+
+
+def angular_dist_pos_angle(ra1, dec1, ra2, dec2):
+ """Computes the angular distance between the two points on a sphere, and
+ the position angle (North through East) of the direction from 1 to 2."""
+ # I lifted this somewhere
+ sind1, sind2 = sin(dec1), sin(dec2)
+ cosd1, cosd2 = cos(dec1), cos(dec2)
+ cosra, sinra = cos(ra1 - ra2), sin(ra1 - ra2)
+
+ adist = numpy.arccos(min(sind1 * sind2 + cosd1 * cosd2 * cosra, 1))
+ pa = numpy.arctan2(-cosd2 * sinra, -cosd2 * sind1 * cosra + sind2 * cosd1)
+ return adist, pa
+
+
+def angular_dist_pos_angle2(ra1, dec1, ra2, dec2):
+ """Computes the angular distance between the two points on a sphere, and
+ the position angle (North through East) of the direction from 1 to 2."""
+ # I re-derived this from Euler angles, but it seems to be identical to the above
+ ra = ra2 - ra1
+ sind0, sind, cosd0, cosd = sin(dec1), sin(dec2), cos(dec1), cos(dec2)
+ sina, cosa = sin(ra) * cosd, cos(ra) * cosd
+ x = cosa * sind0 - sind * cosd0
+ y = sina
+ z = cosa * cosd0 + sind * sind0
+ print x, y, z
+ PA = numpy.arctan2(y, -x)
+ R = numpy.arccos(z)
+
+ return R, PA
+
+
+def angular_dist_pos_angle2(ra1, dec1, ra2, dec2):
+ """Computes the angular distance between the two points on a sphere, and
+ the position angle (North through East) of the direction from 1 to 2."""
+ # I re-derived this from Euler angles, but it seems to be identical to the above
+ ra = ra2 - ra1
+ sind0, sind, cosd0, cosd = sin(dec1), sin(dec2), cos(dec1), cos(dec2)
+ sina, cosa = sin(ra) * cosd, cos(ra) * cosd
+ x = cosa * sind0 - sind * cosd0
+ y = sina
+ z = cosa * cosd0 + sind * sind0
+ print x, y, z
+ PA = numpy.arctan2(y, -x)
+ R = numpy.arccos(z)
+ return R, PA
+
+
+def _deg_to_dms(x, prec=0.01):
+ """Converts x (in degrees) into d,m,s tuple, where d and m are ints.
+ prec gives the precision, in arcseconds."""
+ mins, secs = divmod(round(x * 3600 / prec) * prec, 60)
+ mins = int(mins)
+ degs, mins = divmod(mins, 60)
+ return degs, mins, secs
+
+
+def ra_hms(rad, scale=12, prec=0.01):
+ """Returns RA as tuple of (h,m,s)"""
+ # convert negative values
+ while rad < 0:
+ rad += 2 * math.pi
+ # convert to hours
+ rad *= scale / math.pi
+ return _deg_to_dms(rad, prec)
+
+
+def dec_dms(rad, prec=0.01):
+ return dec_sdms(rad, prec)[1:]
+
+
+def dec_sdms(rad, prec=0.01):
+ """Returns Dec as tuple of (sign,d,m,s). Sign is "+" or "-"."""
+ sign = "-" if rad < 0 else "+"
+ d, m, s = _deg_to_dms(abs(rad) / DEG, prec)
+ return (sign, d, m, s)
+
+
+def ra_hms_string(rad):
+ return "%dh%02dm%05.2fs" % ra_hms(rad)
+
+
+def dec_sdms_string(rad):
+ return "%s%dd%02dm%05.2fs" % dec_sdms(rad)
+
+
+def radec_string(ra, dec):
+ return "%s %s" % (ra_hms_string(ra), dec_sdms_string(dec))
+
+
+class _Projector(object):
"""This is an abstract base class for all projection classes below. A projection class can be used to create projector objects for
conversion between world (ra,dec) and projected (l,m) coordinates.
@@ -158,7 +166,7 @@ class _Projector (object):
* converts l,m->ra,dec as
ra,dec = proj.radec(l,m)
* converts angular offsets (from 0,0 point) into l,m:
- l,m = proj.offset(dra,ddec);
+ l,m = proj.offset(dra,ddec)
Alternativelty, there are class methods which do not require one to instantiate a projector object:
@@ -166,169 +174,175 @@ class _Projector (object):
* Proj.lm_radec(l,m,ra0,dec0)
* Proj.offset_lm(dra,ddec,ra0,dec0)
"""
- def __init__ (self,ra0,dec0,has_projection=False):
- self.ra0,self.dec0,self.sin_dec0,self.cos_dec0 = ra0,dec0,sin(dec0),cos(dec0);
- self._has_projection = has_projection;
- def has_projection (self):
- return bool(self._has_projection);
+ def __init__(self, ra0, dec0, has_projection=False):
+ self.ra0, self.dec0, self.sin_dec0, self.cos_dec0 = ra0, dec0, sin(dec0), cos(dec0)
+ self._has_projection = has_projection
- def __eq__ (self,other):
- """By default, two projections are the same if their classes match, and their ra0/dec0 match."""
- return type(self) is type(other) and self.ra0 == other.ra0 and self.dec0 == other.dec0;
+ def has_projection(self):
+ return bool(self._has_projection)
- def __ne__ (self,other):
- return not self == other;
+ def __eq__(self, other):
+ """By default, two projections are the same if their classes match, and their ra0/dec0 match."""
+ return type(self) is type(other) and self.ra0 == other.ra0 and self.dec0 == other.dec0
+
+ def __ne__(self, other):
+ return not self == other
@classmethod
- def radec_lm (cls,ra,dec,ra0,dec0):
- return cls(ra0,dec0).lm(ra,dec);
+ def radec_lm(cls, ra, dec, ra0, dec0):
+ return cls(ra0, dec0).lm(ra, dec)
@classmethod
- def lm_radec (cls,l,m,ra0,dec0):
- return cls(ra0,dec0).radec(l,m);
+ def lm_radec(cls, l, m, ra0, dec0):
+ return cls(ra0, dec0).radec(l, m)
@classmethod
- def offset_lm (cls,dra,ddec,ra0,dec0):
- return cls(ra0,dec0).offset(dra,ddec);
-
- def lm (self,ra,dec):
- raise TypeError,"lm() not yet implemented in projection %s"%type(self).__name__;
-
- def offset (self,dra,ddec):
- raise TypeError,"offset() not yet implemented in projection %s"%type(self).__name__;
-
- def radec (self,l,m):
- raise TypeError,"radec() not yet implemented in projection %s"%type(self).__name__;
-
-class Projection (object):
- """Projection is a container for the different projection classes.
- Each Projection class can be used to create a projection object: proj = Proj(ra0,dec0), with lm(ra,dec) and radec(l,m) methods.
- """;
-
- class FITSWCSpix (_Projector):
- """FITS WCS projection, as determined by a FITS header. lm is in pixels (0-based)."""
- def __init__ (self,header):
- """Constructor. Create from filename (treated as FITS file), or a FITS header object""";
- # attach to FITS file or header
- if isinstance(header,str):
- header = pyfits.open(header)[0].header;
- else:
- self.wcs = WCS(header,mode="pyfits");
- try:
- ra0,dec0 = self.wcs.getCentreWCSCoords();
- self.xpix0,self.ypix0 = self.wcs.wcs2pix(*self.wcs.getCentreWCSCoords());
- self.xscale = self.wcs.getXPixelSizeDeg()*DEG;
- self.yscale = self.wcs.getYPixelSizeDeg()*DEG;
- has_projection = True;
- except:
- print "No WCS in FITS file, falling back to pixel coordinates.";
- ra0 = dec0 = self.xpix0 = self.ypix0 = 0;
- self.xscale = self.yscale = DEG/3600;
- has_projection = False;
- _Projector.__init__(self,ra0*DEG,dec0*DEG,has_projection=has_projection);
-
- def lm (self,ra,dec):
- if not self.has_projection():
- return numpy.sin(ra)/self.xscale,numpy.sin(dec)/self.yscale;
- if numpy.isscalar(ra) and numpy.isscalar(dec):
- if ra - self.ra0 > math.pi:
- ra -= 2*math.pi;
- if ra - self.ra0 < -math.pi:
- ra += 2*math.pi;
- return self.wcs.wcs2pix(ra/DEG,dec/DEG);
- else:
- if numpy.isscalar(ra):
- ra = numpy.array(ra);
- ra[ra - self.ra0 > math.pi] -= 2*math.pi;
- ra[ra - self.ra0 < -math.pi] += 2*math.pi;
- ## when fed in arrays of ra/dec, wcs.wcs2pix will return a nested list of
- ## [[l1,m1],[l2,m2],,...]. Convert this to an array and extract columns.
- lm = numpy.array(self.wcs.wcs2pix(ra/DEG,dec/DEG));
- return lm[...,0],lm[...,1];
-
- def radec (self,l,m):
- if not self.has_projection():
- return numpy.arcsin(l*self.xscale),numpy.arcsin(m*self.yscale);
- if numpy.isscalar(l) and numpy.isscalar(m):
- ra,dec = self.wcs.pix2wcs(l,m);
- else:
-## this is slow as molasses because of the way astLib.WCS implements the loop. ~120 seconds for 4M pixels
- ## when fed in arrays of ra/dec, wcs.wcs2pix will return a nested list of
- ## [[l1,m1],[l2,m2],,...]. Convert this to an array and extract columns.
-# radec = numpy.array(self.wcs.pix2wcs(l,m));
-# ra = radec[...,0];
-# dec = radec[...,1];
-### try a faster implementation -- oh well, only a bit faster, ~95 seconds for the same
-### can also replace list comprehension with map(), but that doesn't improve things.
-### Note also that the final array constructor takes ~10 secs!
- radec = numpy.array([ PyWCSTools.wcs.pix2wcs(self.wcs.WCSStructure,x,y) for x,y in zip(l+1,m+1) ]);
- ra = radec[...,0];
- dec = radec[...,1];
- return ra*DEG,dec*DEG;
-
-
- def offset (self,dra,ddec):
- return self.xpix0 - dra/self.xscale,self.ypix0 + ddec/self.xscale;
-
- def __eq__ (self,other):
- """By default, two projections are the same if their classes match, and their ra0/dec0 match."""
- return type(self) is type(other) and (self.ra0,self.dec0,self.xpix0,self.ypix0,self.xscale,self.yscale) == (other.ra0,other.dec0,other.xpix0,other.ypix0,other.xscale,other.yscale);
-
- class FITSWCS (FITSWCSpix):
- """FITS WCS projection, as determined by a FITS header. lm is renormalized to radians, l is reversed, 0,0 is at reference pixel."""
- def __init__ (self,header):
- """Constructor. Create from filename (treated as FITS file), or a FITS header object""";
- Projection.FITSWCSpix.__init__(self,header);
-
- def lm (self,ra,dec):
- if not self.has_projection():
- return -numpy.sin(ra)/self.xscale,numpy.sin(dec)/self.yscale;
- if numpy.isscalar(ra) and numpy.isscalar(dec):
- if ra - self.ra0 > math.pi:
- ra -= 2*math.pi;
- if ra - self.ra0 < -math.pi:
- ra += 2*math.pi;
- l,m = self.wcs.wcs2pix(ra/DEG,dec/DEG);
- else:
- if numpy.isscalar(ra):
- ra = numpy.array(ra);
- ra[ra - self.ra0 > math.pi] -= 2*math.pi;
- ra[ra - self.ra0 < -math.pi] += 2*math.pi;
- lm = numpy.array(self.wcs.wcs2pix(ra/DEG,dec/DEG));
- l,m = lm[...,0],lm[...,1];
- l = (self.xpix0-l)*self.xscale;
- m = (m-self.ypix0)*self.yscale;
- return l,m;
-
- def radec (self,l,m):
- if not self.has_projection():
- return numpy.arcsin(-l),numpy.arcsin(m);
- if numpy.isscalar(l) and numpy.isscalar(m):
- ra,dec = self.wcs.pix2wcs(self.xpix0-l/self.xscale,self.ypix0+m/self.yscale);
- else:
- radec = numpy.array(self.wcs.pix2wcs(self.xpix0-l/self.xscale,self.ypix0+m/self.yscale));
- ra = radec[...,0];
- dec = radec[...,1];
- return ra*DEG,dec*DEG;
-
- def offset (self,dra,ddec):
- return dra,ddec;
-
- @staticmethod
- def SinWCS (ra0,dec0):
- hdu = pyfits.PrimaryHDU();
- hdu.header.set('NAXIS',2);
- hdu.header.set('NAXIS1',3);
- hdu.header.set('NAXIS2',3);
- hdu.header.set('CTYPE1','RA---SIN');
- hdu.header.set('CDELT1',-1./60);
- hdu.header.set('CRPIX1',2);
- hdu.header.set('CRVAL1',ra0/DEG);
- hdu.header.set('CUNIT1','deg ');
- hdu.header.set('CTYPE2','DEC--SIN');
- hdu.header.set('CDELT2',1./60);
- hdu.header.set('CRPIX2',2);
- hdu.header.set('CRVAL2',dec0/DEG);
- hdu.header.set('CUNIT2','deg ');
- return Projection.FITSWCS(hdu.header);
+ def offset_lm(cls, dra, ddec, ra0, dec0):
+ return cls(ra0, dec0).offset(dra, ddec)
+
+ def lm(self, ra, dec):
+ raise TypeError, "lm() not yet implemented in projection %s" % type(self).__name__
+
+ def offset(self, dra, ddec):
+ raise TypeError, "offset() not yet implemented in projection %s" % type(self).__name__
+
+ def radec(self, l, m):
+ raise TypeError, "radec() not yet implemented in projection %s" % type(self).__name__
+
+
+class Projection(object):
+ """Projection is a container for the different projection classes.
+ Each Projection class can be used to create a projection object: proj = Proj(ra0,dec0), with lm(ra,dec) and radec(l,m) methods.
+ """
+
+ class FITSWCSpix(_Projector):
+ """FITS WCS projection, as determined by a FITS header. lm is in pixels (0-based)."""
+
+ def __init__(self, header):
+ """Constructor. Create from filename (treated as FITS file), or a FITS header object"""
+ # attach to FITS file or header
+ if isinstance(header, str):
+ header = pyfits.open(header)[0].header
+ else:
+ self.wcs = WCS(header, mode="pyfits")
+ try:
+ ra0, dec0 = self.wcs.getCentreWCSCoords()
+ self.xpix0, self.ypix0 = self.wcs.wcs2pix(*self.wcs.getCentreWCSCoords())
+ self.xscale = self.wcs.getXPixelSizeDeg() * DEG
+ self.yscale = self.wcs.getYPixelSizeDeg() * DEG
+ has_projection = True
+ except:
+ print "No WCS in FITS file, falling back to pixel coordinates."
+ ra0 = dec0 = self.xpix0 = self.ypix0 = 0
+ self.xscale = self.yscale = DEG / 3600
+ has_projection = False
+ _Projector.__init__(self, ra0 * DEG, dec0 * DEG, has_projection=has_projection)
+
+ def lm(self, ra, dec):
+ if not self.has_projection():
+ return numpy.sin(ra) / self.xscale, numpy.sin(dec) / self.yscale
+ if numpy.isscalar(ra) and numpy.isscalar(dec):
+ if ra - self.ra0 > math.pi:
+ ra -= 2 * math.pi
+ if ra - self.ra0 < -math.pi:
+ ra += 2 * math.pi
+ return self.wcs.wcs2pix(ra / DEG, dec / DEG)
+ else:
+ if numpy.isscalar(ra):
+ ra = numpy.array(ra)
+ ra[ra - self.ra0 > math.pi] -= 2 * math.pi
+ ra[ra - self.ra0 < -math.pi] += 2 * math.pi
+ ## when fed in arrays of ra/dec, wcs.wcs2pix will return a nested list of
+ ## [[l1,m1],[l2,m2],,...]. Convert this to an array and extract columns.
+ lm = numpy.array(self.wcs.wcs2pix(ra / DEG, dec / DEG))
+ return lm[..., 0], lm[..., 1]
+
+ def radec(self, l, m):
+ if not self.has_projection():
+ return numpy.arcsin(l * self.xscale), numpy.arcsin(m * self.yscale)
+ if numpy.isscalar(l) and numpy.isscalar(m):
+ ra, dec = self.wcs.pix2wcs(l, m)
+ else:
+ ## this is slow as molasses because of the way astLib.WCS implements the loop. ~120 seconds for 4M pixels
+ ## when fed in arrays of ra/dec, wcs.wcs2pix will return a nested list of
+ ## [[l1,m1],[l2,m2],,...]. Convert this to an array and extract columns.
+ # radec = numpy.array(self.wcs.pix2wcs(l,m))
+ # ra = radec[...,0]
+ # dec = radec[...,1]
+ ### try a faster implementation -- oh well, only a bit faster, ~95 seconds for the same
+ ### can also replace list comprehension with map(), but that doesn't improve things.
+ ### Note also that the final array constructor takes ~10 secs!
+ radec = numpy.array(
+ [PyWCSTools.wcs.pix2wcs(self.wcs.WCSStructure, x, y) for x, y in zip(l + 1, m + 1)])
+ ra = radec[..., 0]
+ dec = radec[..., 1]
+ return ra * DEG, dec * DEG
+
+ def offset(self, dra, ddec):
+ return self.xpix0 - dra / self.xscale, self.ypix0 + ddec / self.xscale
+
+ def __eq__(self, other):
+ """By default, two projections are the same if their classes match, and their ra0/dec0 match."""
+ return type(self) is type(other) and (
+ self.ra0, self.dec0, self.xpix0, self.ypix0, self.xscale, self.yscale) == (
+ other.ra0, other.dec0, other.xpix0, other.ypix0, other.xscale, other.yscale)
+
+ class FITSWCS(FITSWCSpix):
+ """FITS WCS projection, as determined by a FITS header. lm is renormalized to radians, l is reversed, 0,0 is at reference pixel."""
+
+ def __init__(self, header):
+ """Constructor. Create from filename (treated as FITS file), or a FITS header object"""
+ Projection.FITSWCSpix.__init__(self, header)
+
+ def lm(self, ra, dec):
+ if not self.has_projection():
+ return -numpy.sin(ra) / self.xscale, numpy.sin(dec) / self.yscale
+ if numpy.isscalar(ra) and numpy.isscalar(dec):
+ if ra - self.ra0 > math.pi:
+ ra -= 2 * math.pi
+ if ra - self.ra0 < -math.pi:
+ ra += 2 * math.pi
+ l, m = self.wcs.wcs2pix(ra / DEG, dec / DEG)
+ else:
+ if numpy.isscalar(ra):
+ ra = numpy.array(ra)
+ ra[ra - self.ra0 > math.pi] -= 2 * math.pi
+ ra[ra - self.ra0 < -math.pi] += 2 * math.pi
+ lm = numpy.array(self.wcs.wcs2pix(ra / DEG, dec / DEG))
+ l, m = lm[..., 0], lm[..., 1]
+ l = (self.xpix0 - l) * self.xscale
+ m = (m - self.ypix0) * self.yscale
+ return l, m
+
+ def radec(self, l, m):
+ if not self.has_projection():
+ return numpy.arcsin(-l), numpy.arcsin(m)
+ if numpy.isscalar(l) and numpy.isscalar(m):
+ ra, dec = self.wcs.pix2wcs(self.xpix0 - l / self.xscale, self.ypix0 + m / self.yscale)
+ else:
+ radec = numpy.array(self.wcs.pix2wcs(self.xpix0 - l / self.xscale, self.ypix0 + m / self.yscale))
+ ra = radec[..., 0]
+ dec = radec[..., 1]
+ return ra * DEG, dec * DEG
+
+ def offset(self, dra, ddec):
+ return dra, ddec
+
+ @staticmethod
+ def SinWCS(ra0, dec0):
+ hdu = pyfits.PrimaryHDU()
+ hdu.header.set('NAXIS', 2)
+ hdu.header.set('NAXIS1', 3)
+ hdu.header.set('NAXIS2', 3)
+ hdu.header.set('CTYPE1', 'RA---SIN')
+ hdu.header.set('CDELT1', -1. / 60)
+ hdu.header.set('CRPIX1', 2)
+ hdu.header.set('CRVAL1', ra0 / DEG)
+ hdu.header.set('CUNIT1', 'deg ')
+ hdu.header.set('CTYPE2', 'DEC--SIN')
+ hdu.header.set('CDELT2', 1. / 60)
+ hdu.header.set('CRPIX2', 2)
+ hdu.header.set('CRVAL2', dec0 / DEG)
+ hdu.header.set('CUNIT2', 'deg ')
+ return Projection.FITSWCS(hdu.header)
diff --git a/Tigger/Models/Formats/AIPSCC.py b/Tigger/Models/Formats/AIPSCC.py
index a8417f1..3ceb951 100644
--- a/Tigger/Models/Formats/AIPSCC.py
+++ b/Tigger/Models/Formats/AIPSCC.py
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
#
-#% $Id: BBS.py 8378 2011-08-30 15:18:30Z oms $
+# % $Id: BBS.py 8378 2011-08-30 15:18:30Z oms $
#
#
# Copyright (C) 2002-2011
@@ -24,91 +24,82 @@
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
-import sys
-import traceback
import math
-import struct
-import time
-import os.path
-import re
-
-import numpy
-
-import Kittens.utils
+from math import cos, sin, asin, atan2, sqrt
import Tigger.Models.Formats
+from Tigger import Coordinates
from Tigger.Models import ModelClasses
from Tigger.Models import SkyModel
-from Tigger import Coordinates
-from Tigger.Models.Formats import dprint,dprintf
-from math import cos,sin,acos,asin,atan2,sqrt,pi
+from Tigger.Models.Formats import dprint, dprintf
-ARCSEC = (math.pi/180)/(60*60);
+ARCSEC = (math.pi / 180) / (60 * 60)
"""
Loads an AIPS-format clean component list
"""
-def lm_to_radec (l,m,ra0,dec0):
- """Returns ra,dec corresponding to l,m w.r.t. direction ra0,dec0""";
- # see formula at http://en.wikipedia.org/wiki/Orthographic_projection_(cartography)
- rho = sqrt(l**2+m**2);
- if rho == 0.0:
- ra = ra0
- dec = dec0
- else:
- cc = asin(rho);
- ra = ra0 + atan2( l*sin(cc),rho*cos(dec0)*cos(cc)-m*sin(dec0)*sin(cc) );
- dec = asin( cos(cc)*sin(dec0) + m*sin(cc)*cos(dec0)/rho );
- return ra,dec;
+def lm_to_radec(l, m, ra0, dec0):
+ """Returns ra,dec corresponding to l,m w.r.t. direction ra0,dec0"""
+ # see formula at http://en.wikipedia.org/wiki/Orthographic_projection_(cartography)
+ rho = sqrt(l ** 2 + m ** 2)
+ if rho == 0.0:
+ ra = ra0
+ dec = dec0
+ else:
+ cc = asin(rho)
+ ra = ra0 + atan2(l * sin(cc), rho * cos(dec0) * cos(cc) - m * sin(dec0) * sin(cc))
+ dec = asin(cos(cc) * sin(dec0) + m * sin(cc) * cos(dec0) / rho)
+ return ra, dec
+
+
+def load(filename, center=None, **kw):
+ """Imports an AIPs clean component list file
+ """
+ srclist = []
+ dprint(1, "importing AIPS clean component table", filename)
+ # read file
+ ff = file(filename)
-def load (filename,center=None,**kw):
- """Imports an AIPs clean component list file
- """
- srclist = [];
- dprint(1,"importing AIPS clean component table",filename);
- # read file
- ff = file(filename);
-
- if center is None:
- raise ValueError,"field centre must be specified";
+ if center is None:
+ raise ValueError, "field centre must be specified"
- # now process file line-by-line
- linenum = 0;
- for line in ff:
- linenum += 1;
- # parse one line
- dprint(4,"read line:",line);
- ff = line.split();
- if len(ff) != 5:
- continue;
- try:
- num = int(ff[0]);
- dx,dy,i,i_tot = map(float,ff[1:]);
- except:
- continue;
- try:
- # convert dx/dy to real positions
- l,m = sin(dx*ARCSEC),sin(dy*ARCSEC);
- ra,dec = lm_to_radec(l,m,*center);
- pos = ModelClasses.Position(ra,dec);
- except Exception,exc:
- print "CC %d: error converting coordinates (%s), skipping"%(num,str(exc));
- continue;
- flux = ModelClasses.Flux(i);
- # now create a source object
- src = SkyModel.Source('cc%d'%num,pos,flux);
- src.setAttribute('r',math.sqrt(l*l+m*m));
- srclist.append(src);
- dprintf(2,"imported %d sources from file %s\n",len(srclist),filename);
- # create model
- model = ModelClasses.SkyModel(*srclist);
- # setup model center
- model.setFieldCenter(*center);
- # setup radial distances
- projection = Coordinates.Projection.SinWCS(*model.fieldCenter());
- return model;
+ # now process file line-by-line
+ linenum = 0
+ for line in ff:
+ linenum += 1
+ # parse one line
+ dprint(4, "read line:", line)
+ ff = line.split()
+ if len(ff) != 5:
+ continue
+ try:
+ num = int(ff[0])
+ dx, dy, i, i_tot = map(float, ff[1:])
+ except:
+ continue
+ try:
+ # convert dx/dy to real positions
+ l, m = sin(dx * ARCSEC), sin(dy * ARCSEC)
+ ra, dec = lm_to_radec(l, m, *center)
+ pos = ModelClasses.Position(ra, dec)
+ except Exception, exc:
+ print "CC %d: error converting coordinates (%s), skipping" % (num, str(exc))
+ continue
+ flux = ModelClasses.Flux(i)
+ # now create a source object
+ src = SkyModel.Source('cc%d' % num, pos, flux)
+ src.setAttribute('r', math.sqrt(l * l + m * m))
+ srclist.append(src)
+ dprintf(2, "imported %d sources from file %s\n", len(srclist), filename)
+ # create model
+ model = ModelClasses.SkyModel(*srclist)
+ # setup model center
+ model.setFieldCenter(*center)
+ # setup radial distances
+ projection = Coordinates.Projection.SinWCS(*model.fieldCenter())
+ return model
-Tigger.Models.Formats.registerFormat("AIPSCC",load,"AIPS CC list",(".cc",".CC"));
+Tigger.Models.Formats.registerFormat("AIPSCC", load, "AIPS CC list", (".cc", ".CC"))
diff --git a/Tigger/Models/Formats/AIPSCCFITS.py b/Tigger/Models/Formats/AIPSCCFITS.py
index 704b39a..ccff158 100644
--- a/Tigger/Models/Formats/AIPSCCFITS.py
+++ b/Tigger/Models/Formats/AIPSCCFITS.py
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
#
-#% $Id: BBS.py 8378 2011-08-30 15:18:30Z oms $
+# % $Id: BBS.py 8378 2011-08-30 15:18:30Z oms $
#
#
# Copyright (C) 2002-2011
@@ -24,46 +24,40 @@
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
-import sys
-import traceback
import math
-import struct
-import time
-import os.path
-import re
+import sys
+from math import cos, sin, asin, atan2, sqrt
-import numpy
from astropy.io import fits as pyfits
-import Kittens.utils
-
import Tigger.Models.Formats
+from Tigger import Coordinates
from Tigger.Models import ModelClasses
from Tigger.Models import SkyModel
-from Tigger import Coordinates
-from Tigger.Models.Formats import dprint,dprintf
-from math import cos,sin,acos,asin,atan2,sqrt,pi
+from Tigger.Models.Formats import dprint, dprintf
-DEG = math.pi/180
-ARCMIN = DEG/60
-ARCSEC = ARCMIN/60
+DEG = math.pi / 180
+ARCMIN = DEG / 60
+ARCSEC = ARCMIN / 60
"""
Loads an AIPS-format clean component list
"""
-def lm_to_radec (l,m,ra0,dec0):
- """Returns ra,dec corresponding to l,m w.r.t. direction ra0,dec0""";
- # see formula at http://en.wikipedia.org/wiki/Orthographic_projection_(cartography)
- rho = sqrt(l**2+m**2);
- if rho == 0.0:
- ra = ra0
- dec = dec0
- else:
- cc = asin(rho);
- ra = ra0 + atan2( l*sin(cc),rho*cos(dec0)*cos(cc)-m*sin(dec0)*sin(cc) );
- dec = asin( cos(cc)*sin(dec0) + m*sin(cc)*cos(dec0)/rho );
- return ra,dec;
+
+def lm_to_radec(l, m, ra0, dec0):
+ """Returns ra,dec corresponding to l,m w.r.t. direction ra0,dec0"""
+ # see formula at http://en.wikipedia.org/wiki/Orthographic_projection_(cartography)
+ rho = sqrt(l ** 2 + m ** 2)
+ if rho == 0.0:
+ ra = ra0
+ dec = dec0
+ else:
+ cc = asin(rho)
+ ra = ra0 + atan2(l * sin(cc), rho * cos(dec0) * cos(cc) - m * sin(dec0) * sin(cc))
+ dec = asin(cos(cc) * sin(dec0) + m * sin(cc) * cos(dec0) / rho)
+ return ra, dec
+
_units = dict(DEG=DEG, DEGREE=DEG, DEGREES=DEG,
RAD=1, RADIAN=1, RADIANS=1,
@@ -71,46 +65,47 @@ def lm_to_radec (l,m,ra0,dec0):
ARCSEC=ARCSEC, ARCSECS=ARCSEC
)
-def load (filename,center=None,**kw):
- """Imports an AIPS clean component list from FITS table
- """
- srclist = [];
- dprint(1,"importing AIPS clean component FITS table",filename);
- # read file
- ff = pyfits.open(filename);
-
- if center is None:
- hdr = ff[0].header
- ra = hdr['CRVAL1'] * _units[hdr.get('CUNIT1','DEG').strip()]
- dec = hdr['CRVAL2'] * _units[hdr.get('CUNIT2','DEG').strip()]
-
- print "Using FITS image centre (%.4f, %.4f deg) as field centre" % (ra/DEG, dec/DEG)
- center = ra, dec
-
- # now process file line-by-line
- cclist = ff[1].data;
- hdr = ff[1].header
- ux = _units[hdr.get('TUNIT2','DEG').strip()]
- uy = _units[hdr.get('TUNIT3','DEG').strip()]
- for num,ccrec in enumerate(cclist):
- stokes_i,dx,dy = map(float,ccrec);
- # convert dx/dy to real positions
- l,m = sin(dx*ux), sin(dy*uy);
- ra,dec = lm_to_radec(l,m,*center);
- pos = ModelClasses.Position(ra,dec);
- flux = ModelClasses.Flux(stokes_i);
- # now create a source object
- src = SkyModel.Source('cc%d'%num,pos,flux);
- src.setAttribute('r',math.sqrt(l*l+m*m));
- srclist.append(src);
- dprintf(2,"imported %d sources from file %s\n",len(srclist),filename);
- # create model
- model = ModelClasses.SkyModel(*srclist);
- # setup model center
- model.setFieldCenter(*center);
- # setup radial distances
- projection = Coordinates.Projection.SinWCS(*model.fieldCenter());
- return model;
-
-
-Tigger.Models.Formats.registerFormat("AIPSCCFITS",load,"AIPS CC FITS model",(".fits",".FITS",".fts",".FTS"));
+
+def load(filename, center=None, **kw):
+ """Imports an AIPS clean component list from FITS table
+ """
+ srclist = []
+ dprint(1, "importing AIPS clean component FITS table", filename)
+ # read file
+ ff = pyfits.open(filename)
+
+ if center is None:
+ hdr = ff[0].header
+ ra = hdr['CRVAL1'] * _units[hdr.get('CUNIT1', 'DEG').strip()]
+ dec = hdr['CRVAL2'] * _units[hdr.get('CUNIT2', 'DEG').strip()]
+
+ print "Using FITS image centre (%.4f, %.4f deg) as field centre" % (ra / DEG, dec / DEG)
+ center = ra, dec
+
+ # now process file line-by-line
+ cclist = ff[1].data
+ hdr = ff[1].header
+ ux = _units[hdr.get('TUNIT2', 'DEG').strip()]
+ uy = _units[hdr.get('TUNIT3', 'DEG').strip()]
+ for num, ccrec in enumerate(cclist):
+ stokes_i, dx, dy = map(float, ccrec)
+ # convert dx/dy to real positions
+ l, m = sin(dx * ux), sin(dy * uy)
+ ra, dec = lm_to_radec(l, m, *center)
+ pos = ModelClasses.Position(ra, dec)
+ flux = ModelClasses.Flux(stokes_i)
+ # now create a source object
+ src = SkyModel.Source('cc%d' % num, pos, flux)
+ src.setAttribute('r', math.sqrt(l * l + m * m))
+ srclist.append(src)
+ dprintf(2, "imported %d sources from file %s\n", len(srclist), filename)
+ # create model
+ model = ModelClasses.SkyModel(*srclist)
+ # setup model center
+ model.setFieldCenter(*center)
+ # setup radial distances
+ projection = Coordinates.Projection.SinWCS(*model.fieldCenter())
+ return model
+
+
+Tigger.Models.Formats.registerFormat("AIPSCCFITS", load, "AIPS CC FITS model", (".fits", ".FITS", ".fts", ".FTS"))
diff --git a/Tigger/Models/Formats/ASCII.py b/Tigger/Models/Formats/ASCII.py
index db97e01..8694ef2 100644
--- a/Tigger/Models/Formats/ASCII.py
+++ b/Tigger/Models/Formats/ASCII.py
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
#
-#% $Id$
+# % $Id$
#
#
# Copyright (C) 2002-2011
@@ -24,23 +24,23 @@
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
-import sys,traceback,math,numpy,re
-
-import Kittens.utils
+import math
+import re
+import sys
+import traceback
+import Tigger.Models.Formats
+from Tigger import Coordinates
from Tigger.Models import ModelClasses
from Tigger.Models import SkyModel
-from Tigger import Coordinates
-import Tigger.Models.Formats
-from Tigger.Models.Formats import dprint,dprintf
-
+from Tigger.Models.Formats import dprint, dprintf
DefaultDMSFormat = dict(name=0,
- ra_h=1,ra_m=2,ra_s=3,dec_d=4,dec_m=5,dec_s=6,
- i=7,q=8,u=9,v=10,spi=11,rm=12,emaj_s=13,emin_s=14,pa_d=15,
- freq0=16,tags=slice(17,None));
+ ra_h=1, ra_m=2, ra_s=3, dec_d=4, dec_m=5, dec_s=6,
+ i=7, q=8, u=9, v=10, spi=11, rm=12, emaj_s=13, emin_s=14, pa_d=15,
+ freq0=16, tags=slice(17, None))
-DefaultDMSFormatString = "name ra_h ra_m ra_s dec_d dec_m dec_s i q u v spi rm emaj_s emin_s pa_d freq0 tags...";
+DefaultDMSFormatString = "name ra_h ra_m ra_s dec_d dec_m dec_s i q u v spi rm emaj_s emin_s pa_d freq0 tags..."
FormatHelp = """
ASCII files are treated as columns of whitespace-separated values. The order
@@ -84,431 +84,435 @@
tags...: absorb all remaining fields as source tags
:TYPE:ATTR custom attribute. Contents of field will be converted to Python TYPE
(bool, int, float, complex, str) and associated with custom source atribute "ATTR"
-""";
+"""
-DEG = math.pi/180;
+DEG = math.pi / 180
# dict of angulr units with their scale in radians
-ANGULAR_UNITS = dict(rad=1,d=DEG,m=DEG/60,s=DEG/3600,h=DEG*15)
+ANGULAR_UNITS = dict(rad=1, d=DEG, m=DEG / 60, s=DEG / 3600, h=DEG * 15)
# subsets of angular units for leading RA or Dec column
-ANGULAR_UNITS_RA = dict(rad=1,d=DEG,h=DEG*15)
-ANGULAR_UNITS_DEC = dict(rad=1,d=DEG)
+ANGULAR_UNITS_RA = dict(rad=1, d=DEG, h=DEG * 15)
+ANGULAR_UNITS_DEC = dict(rad=1, d=DEG)
+
-def load (filename,format=None,freq0=None,center_on_brightest=False,min_extent=0,verbose=0,**kw):
- """Imports an ASCII table
- The 'format' argument can be either a dict (such as the DefaultDMSFormat dict above), or a string such as DefaultDMSFormatString.
- (Other possible field names are "ra_d", "ra_rad", "dec_rad", "dec_sign".)
- If None is specified, DefaultDMSFormat is used.
- The 'freq0' argument supplies a default reference frequency (if one is not contained in the file.)
- If 'center_on_brightest' is True, the mpodel field center will be set to the brightest source.
- 'min_extent' is minimal source extent (in radians), above which a source will be treated as a Gaussian rather than a point component.
- """
- srclist = [];
- dprint(1,"importing ASCII DMS file",filename);
- # brightest source and its coordinates
- maxbright = 0;
- brightest_name = radec0 = None;
+def load(filename, format=None, freq0=None, center_on_brightest=False, min_extent=0, verbose=0, **kw):
+ """Imports an ASCII table
+ The 'format' argument can be either a dict (such as the DefaultDMSFormat dict above), or a string such as DefaultDMSFormatString.
+ (Other possible field names are "ra_d", "ra_rad", "dec_rad", "dec_sign".)
+ If None is specified, DefaultDMSFormat is used.
+ The 'freq0' argument supplies a default reference frequency (if one is not contained in the file.)
+ If 'center_on_brightest' is True, the mpodel field center will be set to the brightest source.
+ 'min_extent' is minimal source extent (in radians), above which a source will be treated as a Gaussian rather than a point component.
+ """
+ srclist = []
+ dprint(1, "importing ASCII DMS file", filename)
+ # brightest source and its coordinates
+ maxbright = 0
+ brightest_name = radec0 = None
- # Get column number associated with field from format dict, as well as the error
- # column number. Returns tuple of indices, with None index indicating no such column
- def get_field (name):
- return format.get(name,None),format.get(name+"_err",None);
- # Get column number associated with field from format dict, as well as the error
- # column number. Field is an angle thus will be suffixed with _{rad,d,h,m,s}.
- # Returns tuple of
- # column,scale,err_column,err_scale
- # with None index indicating no such column. Scale is scaling factor to convert
- # quantity in column to radians
- def get_ang_field (name,units=ANGULAR_UNITS):
- column = err_column = colunit = errunit = None
- units = units or ANGULAR_UNITS;
- for unit,scale in units.iteritems():
- if column is None:
- column = format.get("%s_%s"%(name,unit));
- if column is not None:
- colunit = scale;
- if err_column is None:
- err_column = format.get("%s_err_%s"%(name,unit))
- if err_column is not None:
- errunit = scale;
- return column,colunit,err_column,errunit;
+ # Get column number associated with field from format dict, as well as the error
+ # column number. Returns tuple of indices, with None index indicating no such column
+ def get_field(name):
+ return format.get(name, None), format.get(name + "_err", None)
- # helper function: returns element #num from the fields list, multiplied by scale, or None if no such field
- def getval (num,scale=1):
- return None if ( num is None or len(fields) <= num ) else float(fields[num])*scale;
+ # Get column number associated with field from format dict, as well as the error
+ # column number. Field is an angle thus will be suffixed with _{rad,d,h,m,s}.
+ # Returns tuple of
+ # column,scale,err_column,err_scale
+ # with None index indicating no such column. Scale is scaling factor to convert
+ # quantity in column to radians
+ def get_ang_field(name, units=ANGULAR_UNITS):
+ column = err_column = colunit = errunit = None
+ units = units or ANGULAR_UNITS
+ for unit, scale in units.iteritems():
+ if column is None:
+ column = format.get("%s_%s" % (name, unit))
+ if column is not None:
+ colunit = scale
+ if err_column is None:
+ err_column = format.get("%s_err_%s" % (name, unit))
+ if err_column is not None:
+ errunit = scale
+ return column, colunit, err_column, errunit
- # now process file line-by-line
- linenum = 0;
- format_str = ''
- for line in file(filename):
- # for the first line, figure out the file format
- if not linenum:
- if not format and line.startswith("#format:"):
- format = line[len("#format:"):].strip();
- dprint(1,"file contains format header:",format);
- # set default format
- if format is None:
- format = DefaultDMSFormatString;
- # is the format a string rather than a dict? Turn it into a dict then
- if isinstance(format,str):
- format_str = format;
- # make list of fieldname,fieldnumber tuples
- fields = [ (field,i) for i,field in enumerate(format.split()) ];
- if not fields:
- raise ValueError,"illegal format string in file: '%s'"%format;
- # last fieldname can end with ... to indicate that it absorbs the rest of the line
- if fields[-1][0].endswith('...'):
- fields[-1] = (fields[-1][0][:-3],slice(fields[-1][1],None));
- # make format dict
- format = dict(fields);
- elif not isinstance(format,dict):
- raise TypeError,"invalid 'format' argument of type %s"%(type(format))
- # nf = max(format.itervalues())+1;
- # fields = ['---']*nf;
- # for field,number in format.iteritems():
- # fields[number] = field;
- # format_str = " ".join(fields);
- # get list of custom attributes from format
- custom_attrs = [];
- for name,col in format.iteritems():
- if name.startswith(":"):
- m = re.match("^:(bool|int|float|complex|str):([\w]+)$",name);
- if not m:
- raise TypeError,"invalid field specification '%s' in format string"%name;
- custom_attrs.append((eval(m.group(1)),m.group(2),col));
- # get minimum necessary fields from format
- name_field = format.get('name',None);
- # flux
- i_field,i_err_field = get_field("i");
- if i_field is None:
- raise ValueError,"ASCII format specification lacks mandatory flux field ('i')";
- # main RA field
- ra_field,ra_scale,ra_err_field,ra_err_scale = get_ang_field('ra',ANGULAR_UNITS_RA);
- if ra_field is None:
- raise ValueError,"ASCII format specification lacks mandatory Right Ascension field ('ra_h', 'ra_d' or 'ra_rad')";
- # main Dec field
- dec_field,dec_scale,dec_err_field,dec_err_scale = get_ang_field('dec',ANGULAR_UNITS_DEC);
- if dec_field is None:
- raise ValueError,"ASCII format specification lacks mandatory Declination field ('dec_d' or 'dec_rad')";
- # polarization as QUV
- quv_fields = [ get_field(x) for x in ['q','u','v'] ];
- # linear polarization as fraction and angle
- polfrac_field = format.get('pol_frac',None);
- if polfrac_field is not None:
- polpa_field,polpa_scale = format.get('pol_pa_d',None),(math.pi/180);
- if not polpa_field is not None:
- polpa_field,polpa_scale = format.get('pol_pa_rad',None),1;
- # fields for extent parameters
- extent_fields = [ get_ang_field(x,ANGULAR_UNITS) for x in 'emaj','emin','pa' ];
- # all three must be present, else ignore
- if any( [ x[0] is None for x in extent_fields ] ):
- extent_fields = None;
- # fields for reference freq and RM and SpI
- freq0_field = format.get('freq0',None);
- rm_field,rm_err_field = get_field('rm');
- spi_fields = [ get_field('spi') ] + [ get_field('spi%d'%i) for i in range(2,10) ];
- tags_slice = format.get('tags',None);
- # now go on to process the line
- linenum += 1;
- try:
- # strip whitespace
- line = line.strip();
- dprintf(4,"%s:%d: read line '%s'\n",filename,linenum,line);
- # skip empty or commented lines
- if not line or line[0] == '#':
- continue;
- # split (at whitespace) into fields
- fields = line.split();
- # get name
- name = fields[name_field] if name_field is not None else str(len(srclist)+1);
- i = getval(i_field);
- i_err = getval(i_err_field);
- # get position: RA
- ra = getval(ra_field);
- ra_err = getval(ra_err_field,ra_scale);
- if 'ra_m' in format:
- ra += float(fields[format['ra_m']])/60.;
- if 'ra_s' in format:
- ra += float(fields[format['ra_s']])/3600.;
- ra *= ra_scale;
- # position: Dec. Separate treatment of sign
- dec = abs(getval(dec_field));
- dec_err = getval(dec_err_field,dec_scale);
- if 'dec_m' in format:
- dec += float(fields[format['dec_m']])/60.;
- if 'dec_s' in format:
- dec += float(fields[format['dec_s']])/3600.;
- if fields[format.get('dec_sign',dec_field)][0] == '-':
- dec = -dec;
- dec *= dec_scale;
- # for up position object
- pos = ModelClasses.Position(ra,dec,ra_err=ra_err,dec_err=dec_err);
- # see if we have freq0
+ # helper function: returns element #num from the fields list, multiplied by scale, or None if no such field
+ def getval(num, scale=1):
+ return None if (num is None or len(fields) <= num) else float(fields[num]) * scale
- # Use explicitly provided reference frequency for this source if available
- f0 = None
- if freq0_field is not None:
+ # now process file line-by-line
+ linenum = 0
+ format_str = ''
+ for line in file(filename):
+ # for the first line, figure out the file format
+ if not linenum:
+ if not format and line.startswith("#format:"):
+ format = line[len("#format:"):].strip()
+ dprint(1, "file contains format header:", format)
+ # set default format
+ if format is None:
+ format = DefaultDMSFormatString
+ # is the format a string rather than a dict? Turn it into a dict then
+ if isinstance(format, str):
+ format_str = format
+ # make list of fieldname,fieldnumber tuples
+ fields = [(field, i) for i, field in enumerate(format.split())]
+ if not fields:
+ raise ValueError, "illegal format string in file: '%s'" % format
+ # last fieldname can end with ... to indicate that it absorbs the rest of the line
+ if fields[-1][0].endswith('...'):
+ fields[-1] = (fields[-1][0][:-3], slice(fields[-1][1], None))
+ # make format dict
+ format = dict(fields)
+ elif not isinstance(format, dict):
+ raise TypeError, "invalid 'format' argument of type %s" % (type(format))
+ # nf = max(format.itervalues())+1
+ # fields = ['---']*nf
+ # for field,number in format.iteritems():
+ # fields[number] = field
+ # format_str = " ".join(fields)
+ # get list of custom attributes from format
+ custom_attrs = []
+ for name, col in format.iteritems():
+ if name.startswith(":"):
+ m = re.match("^:(bool|int|float|complex|str):([\w]+)$", name)
+ if not m:
+ raise TypeError, "invalid field specification '%s' in format string" % name
+ custom_attrs.append((eval(m.group(1)), m.group(2), col))
+ # get minimum necessary fields from format
+ name_field = format.get('name', None)
+ # flux
+ i_field, i_err_field = get_field("i")
+ if i_field is None:
+ raise ValueError, "ASCII format specification lacks mandatory flux field ('i')"
+ # main RA field
+ ra_field, ra_scale, ra_err_field, ra_err_scale = get_ang_field('ra', ANGULAR_UNITS_RA)
+ if ra_field is None:
+ raise ValueError, "ASCII format specification lacks mandatory Right Ascension field ('ra_h', 'ra_d' or 'ra_rad')"
+ # main Dec field
+ dec_field, dec_scale, dec_err_field, dec_err_scale = get_ang_field('dec', ANGULAR_UNITS_DEC)
+ if dec_field is None:
+ raise ValueError, "ASCII format specification lacks mandatory Declination field ('dec_d' or 'dec_rad')"
+ # polarization as QUV
+ quv_fields = [get_field(x) for x in ['q', 'u', 'v']]
+ # linear polarization as fraction and angle
+ polfrac_field = format.get('pol_frac', None)
+ if polfrac_field is not None:
+ polpa_field, polpa_scale = format.get('pol_pa_d', None), (math.pi / 180)
+ if not polpa_field is not None:
+ polpa_field, polpa_scale = format.get('pol_pa_rad', None), 1
+ # fields for extent parameters
+ extent_fields = [get_ang_field(x, ANGULAR_UNITS) for x in 'emaj', 'emin', 'pa']
+ # all three must be present, else ignore
+ if any([x[0] is None for x in extent_fields]):
+ extent_fields = None
+ # fields for reference freq and RM and SpI
+ freq0_field = format.get('freq0', None)
+ rm_field, rm_err_field = get_field('rm')
+ spi_fields = [get_field('spi')] + [get_field('spi%d' % i) for i in range(2, 10)]
+ tags_slice = format.get('tags', None)
+ # now go on to process the line
+ linenum += 1
try:
- f0 = float(fields[freq0_field])
- # If no default reference frequency for the model was supplied,
- # initialise from first source with a reference frequency
- if freq0 is None:
- freq0 = f0
- dprint(0,"Set default freq0 to %s "
- "from source on line %s." % (f0, linenum));
+ # strip whitespace
+ line = line.strip()
+ dprintf(4, "%s:%d: read line '%s'\n", filename, linenum, line)
+ # skip empty or commented lines
+ if not line or line[0] == '#':
+ continue
+ # split (at whitespace) into fields
+ fields = line.split()
+ # get name
+ name = fields[name_field] if name_field is not None else str(len(srclist) + 1)
+ i = getval(i_field)
+ i_err = getval(i_err_field)
+ # get position: RA
+ ra = getval(ra_field)
+ ra_err = getval(ra_err_field, ra_scale)
+ if 'ra_m' in format:
+ ra += float(fields[format['ra_m']]) / 60.
+ if 'ra_s' in format:
+ ra += float(fields[format['ra_s']]) / 3600.
+ ra *= ra_scale
+ # position: Dec. Separate treatment of sign
+ dec = abs(getval(dec_field))
+ dec_err = getval(dec_err_field, dec_scale)
+ if 'dec_m' in format:
+ dec += float(fields[format['dec_m']]) / 60.
+ if 'dec_s' in format:
+ dec += float(fields[format['dec_s']]) / 3600.
+ if fields[format.get('dec_sign', dec_field)][0] == '-':
+ dec = -dec
+ dec *= dec_scale
+ # for up position object
+ pos = ModelClasses.Position(ra, dec, ra_err=ra_err, dec_err=dec_err)
+ # see if we have freq0
- except IndexError:
- f0 = None
+ # Use explicitly provided reference frequency for this source if available
+ f0 = None
+ if freq0_field is not None:
+ try:
+ f0 = float(fields[freq0_field])
+ # If no default reference frequency for the model was supplied,
+ # initialise from first source with a reference frequency
+ if freq0 is None:
+ freq0 = f0
+ dprint(0, "Set default freq0 to %s "
+ "from source on line %s." % (f0, linenum))
- # Otherwise use default reference frequency (derived from args
- # or first reference frequency found in source)
- if f0 is None and freq0 is not None:
- f0 = freq0
+ except IndexError:
+ f0 = None
- # see if we have Q/U/V
- (q,q_err),(u,u_err),(v,v_err) = [ (getval(x),getval(x_err)) for x,x_err in quv_fields ];
- if polfrac_field is not None:
- pf = fields[polfrac_field];
- pf = float(pf[:-1])/100 if pf.endswith("%") else float(pf);
- ppa = float(fields[polpa_field])*polpa_scale if polpa_field is not None else 0;
- q = i*pf*math.cos(2*ppa);
- u = i*pf*math.sin(2*ppa);
- v = 0;
- # see if we have RM as well. Create flux object (unpolarized, polarized, polarized w/RM)
- rm,rm_err = getval(rm_field),getval(rm_err_field);
- if q is None:
- flux = ModelClasses.Polarization(i,0,0,0,I_err=i_err);
- elif f0 is None or rm is None:
- flux = ModelClasses.Polarization(i,q,u,v,I_err=i_err,Q_err=q_err,U_err=u_err,V_err=v_err);
- else:
- flux = ModelClasses.PolarizationWithRM(i,q,u,v,rm,f0,I_err=i_err,Q_err=q_err,U_err=u_err,V_err=v_err,rm_err=rm_err);
- # see if we have a spectral index
- if f0 is None:
- spectrum = None;
- else:
- spi = [ getval(x) for x,xerr in spi_fields ];
- spi_err = [ getval(xerr) for x,xerr in spi_fields ];
- dprint(4,name,"spi is",spi,"err is",spi_err)
- # if any higher-order spectral terms are specified, include them here but trim off all trailing zeroes
- while spi and not spi[-1]:
- del spi[-1];
- del spi_err[-1]
- if not spi:
- spectrum = None;
- elif len(spi) == 1:
- spectrum = ModelClasses.SpectralIndex(spi[0],f0);
- if spi_err[0] is not None:
- spectrum.spi_err = spi_err[0];
- else:
- spectrum = ModelClasses.SpectralIndex(spi,f0);
- if any([ x is not None for x in spi_err ]):
- spectrum.spi_err = spi_err;
- # see if we have extent parameters
- ex = ey = pa = 0;
- if extent_fields:
- ex,ey,pa = [ ( getval(x[0],x[1]) or 0 ) for x in extent_fields ];
- extent_errors = [ getval(x[2],x[3]) for x in extent_fields ];
- # form up shape object
- if (ex or ey) and max(ex,ey) >= min_extent:
- shape = ModelClasses.Gaussian(ex,ey,pa);
- for ifield,field in enumerate(['ex','ey','pa']):
- if extent_errors[ifield] is not None:
- shape.setAttribute(field+"_err",extent_errors[ifield]);
- else:
- shape = None;
- # get tags
- tagdict = {};
- if tags_slice:
- try:
- tags = fields[tags_slice];
- except IndexError:
- pass;
- for tagstr1 in tags:
- for tagstr in tagstr1.split(","):
- if tagstr[0] == "+":
- tagname,value = tagstr[1:],True;
- elif tagstr[0] == "-":
- tagname,value = tagstr[1:],False;
- elif "=" in tagstr:
- tagname,value = tagstr.split("=",1);
- if value[0] in "'\"" and value[-1] in "'\"":
- value = value[1:-1];
- else:
- try:
- value = float(value);
- except:
- continue;
+ # Otherwise use default reference frequency (derived from args
+ # or first reference frequency found in source)
+ if f0 is None and freq0 is not None:
+ f0 = freq0
+
+ # see if we have Q/U/V
+ (q, q_err), (u, u_err), (v, v_err) = [(getval(x), getval(x_err)) for x, x_err in quv_fields]
+ if polfrac_field is not None:
+ pf = fields[polfrac_field]
+ pf = float(pf[:-1]) / 100 if pf.endswith("%") else float(pf)
+ ppa = float(fields[polpa_field]) * polpa_scale if polpa_field is not None else 0
+ q = i * pf * math.cos(2 * ppa)
+ u = i * pf * math.sin(2 * ppa)
+ v = 0
+ # see if we have RM as well. Create flux object (unpolarized, polarized, polarized w/RM)
+ rm, rm_err = getval(rm_field), getval(rm_err_field)
+ if q is None:
+ flux = ModelClasses.Polarization(i, 0, 0, 0, I_err=i_err)
+ elif f0 is None or rm is None:
+ flux = ModelClasses.Polarization(i, q, u, v, I_err=i_err, Q_err=q_err, U_err=u_err, V_err=v_err)
else:
- tagname,value = tagstr,True;
- tagdict[tagname] = value;
- # OK, now form up the source object
- # now create a source object
- dprint(3,name,ra,dec,i,q,u,v);
- src = SkyModel.Source(name,pos,flux,shape=shape,spectrum=spectrum,**tagdict);
- # get custom attributes
- for type_,attr,column in custom_attrs:
- if column is not None and len(fields) > column:
- src.setAttribute(attr,type_(fields[column]));
- # add to source list
- srclist.append(src);
- # check if it's the brightest
- brightness = src.brightness();
- if brightness > maxbright:
- maxbright = brightness;
- brightest_name = src.name;
- radec0 = ra,dec;
- except:
- if verbose:
- traceback.print_exc();
- dprintf(0,"%s:%d: %s, skipping\n",filename,linenum,str(sys.exc_info()[1]));
- dprintf(2,"imported %d sources from file %s\n",len(srclist),filename);
- # create model
- model = ModelClasses.SkyModel(*srclist);
- if freq0 is not None:
- model.setRefFreq(freq0);
- # set model format
- model.setAttribute("ASCII_Format",format_str);
- # setup model center
- if center_on_brightest and radec0:
- dprintf(2,"brightest source is %s (%g Jy) at %f,%f\n",brightest_name,maxbright,*radec0);
- model.setFieldCenter(*radec0);
- # setup radial distances
- projection = Coordinates.Projection.SinWCS(*model.fieldCenter());
- for src in model.sources:
- l,m = projection.lm(src.pos.ra,src.pos.dec);
- src.setAttribute('r',math.sqrt(l*l+m*m));
- return model;
+ flux = ModelClasses.PolarizationWithRM(i, q, u, v, rm, f0, I_err=i_err, Q_err=q_err, U_err=u_err,
+ V_err=v_err, rm_err=rm_err)
+ # see if we have a spectral index
+ if f0 is None:
+ spectrum = None
+ else:
+ spi = [getval(x) for x, xerr in spi_fields]
+ spi_err = [getval(xerr) for x, xerr in spi_fields]
+ dprint(4, name, "spi is", spi, "err is", spi_err)
+ # if any higher-order spectral terms are specified, include them here but trim off all trailing zeroes
+ while spi and not spi[-1]:
+ del spi[-1]
+ del spi_err[-1]
+ if not spi:
+ spectrum = None
+ elif len(spi) == 1:
+ spectrum = ModelClasses.SpectralIndex(spi[0], f0)
+ if spi_err[0] is not None:
+ spectrum.spi_err = spi_err[0]
+ else:
+ spectrum = ModelClasses.SpectralIndex(spi, f0)
+ if any([x is not None for x in spi_err]):
+ spectrum.spi_err = spi_err
+ # see if we have extent parameters
+ ex = ey = pa = 0
+ if extent_fields:
+ ex, ey, pa = [(getval(x[0], x[1]) or 0) for x in extent_fields]
+ extent_errors = [getval(x[2], x[3]) for x in extent_fields]
+ # form up shape object
+ if (ex or ey) and max(ex, ey) >= min_extent:
+ shape = ModelClasses.Gaussian(ex, ey, pa)
+ for ifield, field in enumerate(['ex', 'ey', 'pa']):
+ if extent_errors[ifield] is not None:
+ shape.setAttribute(field + "_err", extent_errors[ifield])
+ else:
+ shape = None
+ # get tags
+ tagdict = {}
+ if tags_slice:
+ try:
+ tags = fields[tags_slice]
+ except IndexError:
+ pass
+ for tagstr1 in tags:
+ for tagstr in tagstr1.split(","):
+ if tagstr[0] == "+":
+ tagname, value = tagstr[1:], True
+ elif tagstr[0] == "-":
+ tagname, value = tagstr[1:], False
+ elif "=" in tagstr:
+ tagname, value = tagstr.split("=", 1)
+ if value[0] in "'\"" and value[-1] in "'\"":
+ value = value[1:-1]
+ else:
+ try:
+ value = float(value)
+ except:
+ continue
+ else:
+ tagname, value = tagstr, True
+ tagdict[tagname] = value
+ # OK, now form up the source object
+ # now create a source object
+ dprint(3, name, ra, dec, i, q, u, v)
+ src = SkyModel.Source(name, pos, flux, shape=shape, spectrum=spectrum, **tagdict)
+ # get custom attributes
+ for type_, attr, column in custom_attrs:
+ if column is not None and len(fields) > column:
+ src.setAttribute(attr, type_(fields[column]))
+ # add to source list
+ srclist.append(src)
+ # check if it's the brightest
+ brightness = src.brightness()
+ if brightness > maxbright:
+ maxbright = brightness
+ brightest_name = src.name
+ radec0 = ra, dec
+ except:
+ if verbose:
+ traceback.print_exc()
+ dprintf(0, "%s:%d: %s, skipping\n", filename, linenum, str(sys.exc_info()[1]))
+ dprintf(2, "imported %d sources from file %s\n", len(srclist), filename)
+ # create model
+ model = ModelClasses.SkyModel(*srclist)
+ if freq0 is not None:
+ model.setRefFreq(freq0)
+ # set model format
+ model.setAttribute("ASCII_Format", format_str)
+ # setup model center
+ if center_on_brightest and radec0:
+ dprintf(2, "brightest source is %s (%g Jy) at %f,%f\n", brightest_name, maxbright, *radec0)
+ model.setFieldCenter(*radec0)
+ # setup radial distances
+ projection = Coordinates.Projection.SinWCS(*model.fieldCenter())
+ for src in model.sources:
+ l, m = projection.lm(src.pos.ra, src.pos.dec)
+ src.setAttribute('r', math.sqrt(l * l + m * m))
+ return model
-def save (model,filename,sources=None,format=None,**kw):
- """
- Exports model to a text file
- """;
- if sources is None:
- sources = model.sources;
- dprintf(2,"writing %d model sources to text file %s\n",len(sources),filename);
- # create catalog parser based on either specified format, or the model format, or the default format
- format_str = format or getattr(model,'ASCII_Format',DefaultDMSFormatString);
- dprint(2,"format string is",format_str);
- # convert this into format dict
- fields = [ [field,i] for i,field in enumerate(format_str.split()) ];
- if not fields:
- raise ValueError,"illegal format string '%s'"%format;
- # last fieldname can end with ... ("tags..."), so strip it
- if fields[-1][0].endswith('...'):
- fields[-1][0] = fields[-1][0][:-3];
- # make format dict
- format = dict(fields);
- nfields = len(fields);
- # get minimum necessary fields from format
- name_field = format.get('name',None);
- # main RA field
- ra_rad_field,ra_d_field,ra_h_field,ra_m_field,ra_s_field = \
- [ format.get(x,None) for x in 'ra_rad','ra_d','ra_h','ra_m','ra_s' ];
- dec_rad_field,dec_d_field,dec_m_field,dec_s_field = \
- [ format.get(x,None) for x in 'dec_rad','dec_d','dec_m','dec_s' ];
- if ra_h_field is not None:
- ra_scale = 15;
- ra_d_field = ra_h_field;
- else:
- ra_scale = 1;
- # fields for reference freq and RM and SpI
- freq0_field = format.get('freq0',None);
- rm_field = format.get('rm',None);
- spi_field = format.get('spi',None);
- tags_field = format.get('tags',None);
- # open file
- ff = open(filename,mode="wt");
- ff.write("#format: %s\n"%format_str);
- # write sources
- nsrc = 0;
- for src in sources:
- # only write points and gaussians
- if src.shape is not None and not isinstance(src.shape,ModelClasses.Gaussian):
- dprint(3,"skipping source '%s': non-supported type '%s'"%(src.name,src.shape.typecode));
- continue;
- # prepare field values
- fval = ['0']*nfields;
- # name
- if name_field is not None:
- fval[name_field] = src.name;
- # position: RA
- ra,dec = src.pos.ra,src.pos.dec;
- # RA in radians
- if ra_rad_field is not None:
- fval[ra_rad_field] = str(ra);
- ra /= ra_scale;
- # RA in h/m/s or d/m/s
- if ra_m_field is not None:
- ra,ram,ras = src.pos.ra_hms_static(ra,scale=180,prec=1e-4);
- fval[ra_m_field] = str(ram);
- if ra_s_field is not None:
- fval[ra_s_field] = str(ras);
- if ra_d_field is not None:
- fval[ra_d_field] = str(ra);
- elif ra_d_field is not None:
- fval[ra_d_field] = str(ra*180/math.pi);
- # position: Dec
- if dec_rad_field is not None:
- fval[dec_rad_field] = str(dec);
- if dec_m_field is not None:
- dsign,decd,decm,decs = src.pos.dec_sdms();
- fval[dec_m_field] = str(decm);
- if dec_s_field is not None:
- fval[dec_s_field] = str(decs);
- if dec_d_field is not None:
- fval[dec_d_field] = dsign+str(decd);
- elif dec_d_field is not None:
- fval[dec_d_field] = str(dec*180/math.pi);
- # fluxes
- for stokes in "IQUV":
- field = format.get(stokes.lower());
- if field is not None:
- fval[field] = str(getattr(src.flux,stokes,0));
- # fractional polarization
- if 'pol_frac' in format:
- i,q,u = [ getattr(src.flux,stokes,0) for stokes in "IQU" ];
- fval[format['pol_frac']] = str(math.sqrt(q*q+u*u)/i);
- pa = math.atan2(u,q)/2;
- for field,scale in ('pol_pa_rad',1.),('pol_pa_d',DEG):
- ifield = format.get(field);
- if ifield is not None:
- fval[ifield] = str(pa/scale);
- # shape
- if src.shape:
- for parm,sparm in ("emaj","ex"),("emin","ey"),("pa","pa"):
- for field,scale in (parm,1.),(parm+'_rad',DEG),(parm+'_d',DEG),(parm+'_m',DEG/60),(parm+'_s',DEG/3600):
- ifield = format.get(field.lower());
- if ifield is not None:
- fval[ifield] = str(getattr(src.shape,sparm,0)/scale);
- # RM, spi, freq0
- if freq0_field is not None:
- freq0 = (src.spectrum and getattr(src.spectrum,'freq0',None)) or getattr(src.flux,'freq0',0);
- fval[freq0_field] = str(freq0);
- if rm_field is not None:
- fval[rm_field] = str(getattr(src.flux,'rm',0));
- if spi_field is not None and hasattr(src,'spectrum'):
- fval[spi_field] = str(getattr(src.spectrum,'spi',0));
- # tags
- if tags_field is not None:
- outtags = [];
- for tag,value in src.getTags():
- if isinstance(value,str):
- outtags.append("%s=\"%s\""%(tag,value));
- elif isinstance(value,bool):
- if value:
- outtags.append("+"+tag);
- else:
- outtags.append("-"+tag);
- elif isinstance(value,(int,float)):
- outtags.append("%s=%f"%(tag,value));
- fval[tags_field] = ",".join(outtags);
- # write the line
- ff.write(" ".join(fval)+"\n");
- nsrc += 1;
+def save(model, filename, sources=None, format=None, **kw):
+ """
+ Exports model to a text file
+ """
+ if sources is None:
+ sources = model.sources
+ dprintf(2, "writing %d model sources to text file %s\n", len(sources), filename)
+ # create catalog parser based on either specified format, or the model format, or the default format
+ format_str = format or getattr(model, 'ASCII_Format', DefaultDMSFormatString)
+ dprint(2, "format string is", format_str)
+ # convert this into format dict
+ fields = [[field, i] for i, field in enumerate(format_str.split())]
+ if not fields:
+ raise ValueError, "illegal format string '%s'" % format
+ # last fieldname can end with ... ("tags..."), so strip it
+ if fields[-1][0].endswith('...'):
+ fields[-1][0] = fields[-1][0][:-3]
+ # make format dict
+ format = dict(fields)
+ nfields = len(fields)
+ # get minimum necessary fields from format
+ name_field = format.get('name', None)
+ # main RA field
+ ra_rad_field, ra_d_field, ra_h_field, ra_m_field, ra_s_field = \
+ [format.get(x, None) for x in 'ra_rad', 'ra_d', 'ra_h', 'ra_m', 'ra_s']
+ dec_rad_field, dec_d_field, dec_m_field, dec_s_field = \
+ [format.get(x, None) for x in 'dec_rad', 'dec_d', 'dec_m', 'dec_s']
+ if ra_h_field is not None:
+ ra_scale = 15
+ ra_d_field = ra_h_field
+ else:
+ ra_scale = 1
+ # fields for reference freq and RM and SpI
+ freq0_field = format.get('freq0', None)
+ rm_field = format.get('rm', None)
+ spi_field = format.get('spi', None)
+ tags_field = format.get('tags', None)
+ # open file
+ ff = open(filename, mode="wt")
+ ff.write("#format: %s\n" % format_str)
+ # write sources
+ nsrc = 0
+ for src in sources:
+ # only write points and gaussians
+ if src.shape is not None and not isinstance(src.shape, ModelClasses.Gaussian):
+ dprint(3, "skipping source '%s': non-supported type '%s'" % (src.name, src.shape.typecode))
+ continue
+ # prepare field values
+ fval = ['0'] * nfields
+ # name
+ if name_field is not None:
+ fval[name_field] = src.name
+ # position: RA
+ ra, dec = src.pos.ra, src.pos.dec
+ # RA in radians
+ if ra_rad_field is not None:
+ fval[ra_rad_field] = str(ra)
+ ra /= ra_scale
+ # RA in h/m/s or d/m/s
+ if ra_m_field is not None:
+ ra, ram, ras = src.pos.ra_hms_static(ra, scale=180, prec=1e-4)
+ fval[ra_m_field] = str(ram)
+ if ra_s_field is not None:
+ fval[ra_s_field] = str(ras)
+ if ra_d_field is not None:
+ fval[ra_d_field] = str(ra)
+ elif ra_d_field is not None:
+ fval[ra_d_field] = str(ra * 180 / math.pi)
+ # position: Dec
+ if dec_rad_field is not None:
+ fval[dec_rad_field] = str(dec)
+ if dec_m_field is not None:
+ dsign, decd, decm, decs = src.pos.dec_sdms()
+ fval[dec_m_field] = str(decm)
+ if dec_s_field is not None:
+ fval[dec_s_field] = str(decs)
+ if dec_d_field is not None:
+ fval[dec_d_field] = dsign + str(decd)
+ elif dec_d_field is not None:
+ fval[dec_d_field] = str(dec * 180 / math.pi)
+ # fluxes
+ for stokes in "IQUV":
+ field = format.get(stokes.lower())
+ if field is not None:
+ fval[field] = str(getattr(src.flux, stokes, 0))
+ # fractional polarization
+ if 'pol_frac' in format:
+ i, q, u = [getattr(src.flux, stokes, 0) for stokes in "IQU"]
+ fval[format['pol_frac']] = str(math.sqrt(q * q + u * u) / i)
+ pa = math.atan2(u, q) / 2
+ for field, scale in ('pol_pa_rad', 1.), ('pol_pa_d', DEG):
+ ifield = format.get(field)
+ if ifield is not None:
+ fval[ifield] = str(pa / scale)
+ # shape
+ if src.shape:
+ for parm, sparm in ("emaj", "ex"), ("emin", "ey"), ("pa", "pa"):
+ for field, scale in (parm, 1.), (parm + '_rad', DEG), (parm + '_d', DEG), (parm + '_m', DEG / 60), (
+ parm + '_s', DEG / 3600):
+ ifield = format.get(field.lower())
+ if ifield is not None:
+ fval[ifield] = str(getattr(src.shape, sparm, 0) / scale)
+ # RM, spi, freq0
+ if freq0_field is not None:
+ freq0 = (src.spectrum and getattr(src.spectrum, 'freq0', None)) or getattr(src.flux, 'freq0', 0)
+ fval[freq0_field] = str(freq0)
+ if rm_field is not None:
+ fval[rm_field] = str(getattr(src.flux, 'rm', 0))
+ if spi_field is not None and hasattr(src, 'spectrum'):
+ fval[spi_field] = str(getattr(src.spectrum, 'spi', 0))
+ # tags
+ if tags_field is not None:
+ outtags = []
+ for tag, value in src.getTags():
+ if isinstance(value, str):
+ outtags.append("%s=\"%s\"" % (tag, value))
+ elif isinstance(value, bool):
+ if value:
+ outtags.append("+" + tag)
+ else:
+ outtags.append("-" + tag)
+ elif isinstance(value, (int, float)):
+ outtags.append("%s=%f" % (tag, value))
+ fval[tags_field] = ",".join(outtags)
+ # write the line
+ ff.write(" ".join(fval) + "\n")
+ nsrc += 1
- ff.close();
- dprintf(1,"wrote %d sources to file %s\n",nsrc,filename);
+ ff.close()
+ dprintf(1, "wrote %d sources to file %s\n", nsrc, filename)
-Tigger.Models.Formats.registerFormat("ASCII",load,"ASCII table",(".txt",".lsm"),export_func=save);
+Tigger.Models.Formats.registerFormat("ASCII", load, "ASCII table", (".txt", ".lsm"), export_func=save)
diff --git a/Tigger/Models/Formats/BBS.py b/Tigger/Models/Formats/BBS.py
index 4431c8b..3e52a90 100644
--- a/Tigger/Models/Formats/BBS.py
+++ b/Tigger/Models/Formats/BBS.py
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
#
-#% $Id$
+# % $Id$
#
#
# Copyright (C) 2002-2011
@@ -24,24 +24,15 @@
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
-import sys
-import traceback
import math
-import struct
-import time
-import os.path
import re
-
-import numpy
-
-import Kittens.utils
+import sys
import Tigger.Models.Formats
+from Tigger import Coordinates
from Tigger.Models import ModelClasses
from Tigger.Models import SkyModel
-from Tigger import Coordinates
-from Tigger.Models.Formats import dprint,dprintf
-
+from Tigger.Models.Formats import dprint, dprintf
"""
The BBS sky model catalog file (*.cat, or *.catalog) is a human-readable text
@@ -52,353 +43,358 @@
http://www.lofar.org/operations/doku.php?id=engineering:software:tools:bbs#creating_a_catalog_file
"""
-class CatalogLine (object):
- """A CatalogLine turns one catalog file line into an object whose attributes correspond to the fields.
- """;
- def __init__ (self,parser,fields=None):
- """Creates a catalog line. If fields!=None, then this contains a list of fields already filled in""";
- self._parser = parser;
- self._fields = fields;
- if fields:
- # parse fields
- for field,number in parser.field_number.iteritems():
- fval = fields[number].strip() if number < len(fields) else '';
- if not fval:
- fval = parser.field_default.get(field,'');
- setattr(self,field,fval);
- # make directions
- self.ra_rad = parser.getAngle(self,'Ra','rah','rad','ram','ras');
- self.dec_rad = parser.getAngle(self,'Dec','dech','decd','decm','decs');
- else:
- # else make empty line
- for field in parser.field_number.iterkeys():
- setattr(self,field,'');
-
- def setPosition (self,ra,dec):
- """Sets the position ra/dec in radians: fills in fields according to the parser format""";
- self.ra_rad,self.dec_rad = ra,dec;
- self._parser.putAngle(self,ra,'Ra','rah','rad','ram','ras');
- self._parser.putAngle(self,dec,'Dec','dech','decd','decm','decs');
-
- def makeStr (self):
- """Converts into a string using the designated parser""";
- # build up dict of valid fields
- fields = {};
- for field,num in self._parser.field_number.iteritems():
- value = getattr(self,field,None);
- if value:
- fields[num] = value;
- # output
- output = "";
- nfields = max(fields.iterkeys())+1;
- for i in range(nfields):
- sep = self._parser.separators[i] if i field index
- self.field_number = {};
- # this is a dict of field name -> default value
- self.field_default = dict(Category='2',I='1');
- # fill up the dicts
- for num_field,field in enumerate(fields):
- # is a default value given?
- match = re.match("(.+)='(.*)'$",field);
- if match:
- field = match.group(1);
- self.field_default[field] = match.group(2);
- self.field_number[field] = num_field;
- dprint(2,"fields are",self.field_number);
- dprint(2,"default values are",self.field_default);
- dprint(2,"separators are",self.separators);
-
- def defines (self,field):
- return field in self.field_number;
-
- def parse (self,line,linenum=0):
- """Parses one line. Returns None for empty or commented lines, else returns a CatalogLine object""";
- # strip whitespace
- line = line.strip();
- dprintf(3,"read line %d: %s\n",linenum,line);
- # skip empty or commented lines
- if not line or line[0] == '#':
- return None;
- # split using separators, quit when no more separators
- fields = [];
- for sep in self.separators:
- ff = line.split(sep,1);
- if len(ff) < 2:
- break;
- fields.append(ff[0]);
- line = ff[1];
- fields.append(line);
- dprint(4,"line %d: "%linenum,fields);
- return CatalogLine(self,fields);
-
- def newline (self):
- return CatalogLine(self);
- def getAngle (self,catline,field,fh,fd,fm,fs):
- """Helper function: given a CatalogLine, and a set of field indentifiers, turns this
- into an angle (in radians).""";
- scale = 1;
- if self.defines(field):
- fstr = getattr(catline,field,None);
- match = re.match('([+-]?\s*\d+)[h:](\d+)[m:]([\d.]*)s?$',fstr);
- if match:
- scale = 15;
- else:
- match = re.match('([+-]?\s*\d+).(\d+).(.*)$',fstr);
- if not match:
- raise ValueError,"invalid direction '%s'"%fstr;
- d,m,s = match.groups();
- else:
- if self.defines(fh):
- scale = 15;
- d = getattr(catline,fh);
- else:
- d = getattr(catline,fd,'0');
- m = getattr(catline,fm,'0');
- s = getattr(catline,fs,'0');
- # now, d,m,s are strings
- if d.startswith('-'):
- scale = -scale;
- d = d[1:];
- # convert to degrees
- return scale*(float(d) + float(m)/60 + float(s)/3600)*math.pi/180;
+class CatalogLine(object):
+ """A CatalogLine turns one catalog file line into an object whose attributes correspond to the fields.
+ """
+
+ def __init__(self, parser, fields=None):
+ """Creates a catalog line. If fields!=None, then this contains a list of fields already filled in"""
+ self._parser = parser
+ self._fields = fields
+ if fields:
+ # parse fields
+ for field, number in parser.field_number.iteritems():
+ fval = fields[number].strip() if number < len(fields) else ''
+ if not fval:
+ fval = parser.field_default.get(field, '')
+ setattr(self, field, fval)
+ # make directions
+ self.ra_rad = parser.getAngle(self, 'Ra', 'rah', 'rad', 'ram', 'ras')
+ self.dec_rad = parser.getAngle(self, 'Dec', 'dech', 'decd', 'decm', 'decs')
+ else:
+ # else make empty line
+ for field in parser.field_number.iterkeys():
+ setattr(self, field, '')
+
+ def setPosition(self, ra, dec):
+ """Sets the position ra/dec in radians: fills in fields according to the parser format"""
+ self.ra_rad, self.dec_rad = ra, dec
+ self._parser.putAngle(self, ra, 'Ra', 'rah', 'rad', 'ram', 'ras')
+ self._parser.putAngle(self, dec, 'Dec', 'dech', 'decd', 'decm', 'decs')
+
+ def makeStr(self):
+ """Converts into a string using the designated parser"""
+ # build up dict of valid fields
+ fields = {}
+ for field, num in self._parser.field_number.iteritems():
+ value = getattr(self, field, None)
+ if value:
+ fields[num] = value
+ # output
+ output = ""
+ nfields = max(fields.iterkeys()) + 1
+ for i in range(nfields):
+ sep = self._parser.separators[i] if i < nfields - 1 else ''
+ output += "%s%s" % (fields.get(i, ''), sep)
+ return output
+
+
+class CatalogParser(object):
+ def __init__(self, format):
+ # figure out fields and their separators
+ fields = []
+ self.separators = []
+ while True:
+ match = re.match("(\w[\w:]*(=(fixed)?'[^']*')?)(([^\w]+)(\w.*))?$", format)
+ if not match:
+ break
+ fields.append(match.group(1))
+ # if no group 4, then we've reached the last field
+ if not match.group(4):
+ break
+ self.separators.append(match.group(5))
+ format = match.group(6)
+ # now parse the format specification
+ # this is a dict of field name -> field index
+ self.field_number = {}
+ # this is a dict of field name -> default value
+ self.field_default = dict(Category='2', I='1')
+ # fill up the dicts
+ for num_field, field in enumerate(fields):
+ # is a default value given?
+ match = re.match("(.+)='(.*)'$", field)
+ if match:
+ field = match.group(1)
+ self.field_default[field] = match.group(2)
+ self.field_number[field] = num_field
+ dprint(2, "fields are", self.field_number)
+ dprint(2, "default values are", self.field_default)
+ dprint(2, "separators are", self.separators)
+
+ def defines(self, field):
+ return field in self.field_number
+
+ def parse(self, line, linenum=0):
+ """Parses one line. Returns None for empty or commented lines, else returns a CatalogLine object"""
+ # strip whitespace
+ line = line.strip()
+ dprintf(3, "read line %d: %s\n", linenum, line)
+ # skip empty or commented lines
+ if not line or line[0] == '#':
+ return None
+ # split using separators, quit when no more separators
+ fields = []
+ for sep in self.separators:
+ ff = line.split(sep, 1)
+ if len(ff) < 2:
+ break
+ fields.append(ff[0])
+ line = ff[1]
+ fields.append(line)
+ dprint(4, "line %d: " % linenum, fields)
+ return CatalogLine(self, fields)
+
+ def newline(self):
+ return CatalogLine(self)
+
+ def getAngle(self, catline, field, fh, fd, fm, fs):
+ """Helper function: given a CatalogLine, and a set of field indentifiers, turns this
+ into an angle (in radians)."""
+ scale = 1
+ if self.defines(field):
+ fstr = getattr(catline, field, None)
+ match = re.match('([+-]?\s*\d+)[h:](\d+)[m:]([\d.]*)s?$', fstr)
+ if match:
+ scale = 15
+ else:
+ match = re.match('([+-]?\s*\d+).(\d+).(.*)$', fstr)
+ if not match:
+ raise ValueError, "invalid direction '%s'" % fstr
+ d, m, s = match.groups()
+ else:
+ if self.defines(fh):
+ scale = 15
+ d = getattr(catline, fh)
+ else:
+ d = getattr(catline, fd, '0')
+ m = getattr(catline, fm, '0')
+ s = getattr(catline, fs, '0')
+ # now, d,m,s are strings
+ if d.startswith('-'):
+ scale = -scale
+ d = d[1:]
+ # convert to degrees
+ return scale * (float(d) + float(m) / 60 + float(s) / 3600) * math.pi / 180
+
+ def putAngle(self, catline, angle, field, fh, fd, fm, fs, prec=1e-6):
+ """Helper function: inverse of getAngle."""
+ # decompose angle into sign,d,m,s
+ if angle < 0:
+ sign = "-"
+ angle = -angle
+ else:
+ sign = "+" if field == "Dec" else ""
+ angle *= 12 / math.pi if not self.defines(field) and self.defines(fh) else 180 / math.pi
+ mins, secs = divmod(round(angle * 3600 / prec) * prec, 60)
+ mins = int(mins)
+ degs, mins = divmod(mins, 60)
+ # generate output
+ if self.defines(field):
+ setattr(catline, field, "%s%d.%d.%.4f" % (sign, degs, mins, secs))
+ else:
+ setattr(catline, fh if self.defines(fh) else fd, "%s%d" % (sign, degs))
+ setattr(catline, fm, "%d" % mins)
+ setattr(catline, fs, "%.4f" % secs)
+
+
+def load(filename, freq0=None, center_on_brightest=False, **kw):
+ """Imports an BBS catalog file
+ The 'format' argument can be either a dict (such as the DefaultDMSFormat dict above), or a string such as DefaultDMSFormatString.
+ (Other possible field names are "ra_d", "ra_rad", "dec_rad", "dec_sign".)
+ If None is specified, DefaultDMSFormat is used.
+ The 'freq0' argument supplies a default reference frequency (if one is not contained in the file.)
+ If 'center_on_brightest' is True, the mpodel field center will be set to the brightest source,
+ else to the center of the first patch.
+ """
+ srclist = []
+ dprint(1, "importing BBS source table", filename)
+ # read file
+ ff = file(filename)
+ # first line must be a format string: extract it
+ line0 = ff.readline().strip()
+ match = re.match("#\s*\((.+)\)\s*=\s*format", line0)
+ if not match:
+ raise ValueError, "line 1 is not a valid format specification"
+ format_str = match.group(1)
+ # create format parser from this string
+ parser = CatalogParser(format_str)
- def putAngle (self,catline,angle,field,fh,fd,fm,fs,prec=1e-6):
- """Helper function: inverse of getAngle.""";
- # decompose angle into sign,d,m,s
- if angle < 0:
- sign = "-";
- angle = -angle;
- else:
- sign = "+" if field == "Dec" else "";
- angle *= 12/math.pi if not self.defines(field) and self.defines(fh) else 180/math.pi;
- mins,secs = divmod(round(angle*3600/prec)*prec,60);
- mins = int(mins);
- degs,mins = divmod(mins,60);
- #generate output
- if self.defines(field):
- setattr(catline,field,"%s%d.%d.%.4f"%(sign,degs,mins,secs));
- else:
- setattr(catline,fh if self.defines(fh) else fd,"%s%d"%(sign,degs));
- setattr(catline,fm,"%d"%mins);
- setattr(catline,fs,"%.4f"%secs);
+ # check for mandatory fields
+ for field in "Name", "Type":
+ if not parser.defines(field):
+ raise ValueError, "Table lacks mandatory field '%s'" % field
+ maxbright = 0
+ patches = []
+ ref_freq = freq0
-def load (filename,freq0=None,center_on_brightest=False,**kw):
- """Imports an BBS catalog file
- The 'format' argument can be either a dict (such as the DefaultDMSFormat dict above), or a string such as DefaultDMSFormatString.
- (Other possible field names are "ra_d", "ra_rad", "dec_rad", "dec_sign".)
- If None is specified, DefaultDMSFormat is used.
- The 'freq0' argument supplies a default reference frequency (if one is not contained in the file.)
- If 'center_on_brightest' is True, the mpodel field center will be set to the brightest source,
- else to the center of the first patch.
- """
- srclist = [];
- dprint(1,"importing BBS source table",filename);
- # read file
- ff = file(filename);
- # first line must be a format string: extract it
- line0 = ff.readline().strip();
- match = re.match("#\s*\((.+)\)\s*=\s*format",line0);
- if not match:
- raise ValueError,"line 1 is not a valid format specification";
- format_str = match.group(1);
- # create format parser from this string
- parser = CatalogParser(format_str);
-
- # check for mandatory fields
- for field in "Name","Type":
- if not parser.defines(field):
- raise ValueError,"Table lacks mandatory field '%s'"%field;
+ # now process file line-by-line
+ linenum = 1
+ for line in ff:
+ linenum += 1
+ try:
+ # parse one line
+ dprint(4, "read line:", line)
+ catline = parser.parse(line, linenum)
+ if not catline:
+ continue
+ dprint(5, "line %d: " % linenum, catline.__dict__)
+ # is it a patch record?
+ patchname = getattr(catline, 'Patch', '')
+ if not catline.Name:
+ dprintf(2, "%s:%d: patch %s\n", filename, linenum, patchname)
+ patches.append((patchname, catline.ra_rad, catline.dec_rad))
+ continue
+ # form up name
+ name = "%s:%s" % (patchname, catline.Name) if patchname else catline.Name
+ # check source type
+ stype = catline.Type.upper()
+ if stype not in ("POINT", "GAUSSIAN"):
+ raise ValueError, "unsupported source type %s" % stype
+ # see if we have freq0
+ if freq0:
+ f0 = freq0
+ elif hasattr(catline, 'ReferenceFrequency'):
+ f0 = float(catline.ReferenceFrequency or '0')
+ else:
+ f0 = None
+ # set model refrence frequency
+ if f0 is not None and ref_freq is None:
+ ref_freq = f0
+ # see if we have Q/U/V
+ i, q, u, v = [float(getattr(catline, stokes, '0') or '0') for stokes in "IQUV"]
+ # see if we have RM as well. Create flux object (unpolarized, polarized, polarized w/RM)
+ if f0 is not None and hasattr(catline, 'RotationMeasure'):
+ flux = ModelClasses.PolarizationWithRM(i, q, u, v, float(catline.RotationMeasure or '0'), f0)
+ else:
+ flux = ModelClasses.Polarization(i, q, u, v)
+ # see if we have a spectral index
+ if f0 is not None and hasattr(catline, 'SpectralIndex:0'):
+ spectrum = ModelClasses.SpectralIndex(float(getattr(catline, 'SpectralIndex:0') or '0'), f0)
+ else:
+ spectrum = None
+ # see if we have extent parameters
+ if stype == "GAUSSIAN":
+ ex = float(getattr(catline, "MajorAxis", "0") or "0")
+ ey = float(getattr(catline, "MinorAxis", "0") or "0")
+ pa = float(getattr(catline, "Orientation", "0") or "0")
+ shape = ModelClasses.Gaussian(ex, ey, pa)
+ else:
+ shape = None
+ # create tags
+ tags = {}
+ for field in "Patch", "Category":
+ if hasattr(catline, field):
+ tags['BBS_%s' % field] = getattr(catline, field)
+ # OK, now form up the source object
+ # position
+ pos = ModelClasses.Position(catline.ra_rad, catline.dec_rad)
+ # now create a source object
+ src = SkyModel.Source(name, pos, flux, shape=shape, spectrum=spectrum, **tags)
+ srclist.append(src)
+ # check if it's the brightest
+ brightness = src.brightness()
+ if brightness > maxbright:
+ maxbright = brightness
+ brightest_name = src.name
+ radec0 = catline.ra_rad, catline.dec_rad
+ except:
+ dprintf(0, "%s:%d: %s, skipping\n", filename, linenum, str(sys.exc_info()[1]))
+ dprintf(2, "imported %d sources from file %s\n", len(srclist), filename)
+ # create model
+ model = ModelClasses.SkyModel(*srclist)
+ if ref_freq is not None:
+ model.setRefFreq(ref_freq)
+ # setup model center
+ if center_on_brightest and radec0:
+ dprintf(2, "setting model centre to brightest source %s (%g Jy) at %f,%f\n", brightest_name, maxbright,
+ *radec0)
+ model.setFieldCenter(*radec0)
+ elif patches:
+ name, ra, dec = patches[0]
+ dprintf(2, "setting model centre to first patch %s at %f,%f\n", name, ra, dec)
+ model.setFieldCenter(ra, dec)
+ # map patches to model tags
+ model.setAttribute("BBS_Patches", patches)
+ model.setAttribute("BBS_Format", format_str)
+ # setup radial distances
+ projection = Coordinates.Projection.SinWCS(*model.fieldCenter())
+ for src in model.sources:
+ l, m = projection.lm(src.pos.ra, src.pos.dec)
+ src.setAttribute('r', math.sqrt(l * l + m * m))
+ return model
- maxbright = 0;
- patches = [];
- ref_freq = freq0;
- # now process file line-by-line
- linenum = 1;
- for line in ff:
- linenum += 1;
- try:
- # parse one line
- dprint(4,"read line:",line);
- catline = parser.parse(line,linenum);
- if not catline:
- continue;
- dprint(5,"line %d: "%linenum,catline.__dict__);
- # is it a patch record?
- patchname = getattr(catline,'Patch','');
- if not catline.Name:
- dprintf(2,"%s:%d: patch %s\n",filename,linenum,patchname);
- patches.append((patchname,catline.ra_rad,catline.dec_rad));
- continue;
- # form up name
- name = "%s:%s"%(patchname,catline.Name) if patchname else catline.Name;
- # check source type
- stype = catline.Type.upper();
- if stype not in ("POINT","GAUSSIAN"):
- raise ValueError,"unsupported source type %s"%stype;
- # see if we have freq0
- if freq0:
- f0 = freq0;
- elif hasattr(catline,'ReferenceFrequency'):
- f0 = float(catline.ReferenceFrequency or '0');
- else:
- f0 = None;
- # set model refrence frequency
- if f0 is not None and ref_freq is None:
- ref_freq = f0;
- # see if we have Q/U/V
- i,q,u,v = [ float(getattr(catline,stokes,'0') or '0') for stokes in "IQUV" ];
- # see if we have RM as well. Create flux object (unpolarized, polarized, polarized w/RM)
- if f0 is not None and hasattr(catline,'RotationMeasure'):
- flux = ModelClasses.PolarizationWithRM(i,q,u,v,float(catline.RotationMeasure or '0'),f0);
- else:
- flux = ModelClasses.Polarization(i,q,u,v);
- # see if we have a spectral index
- if f0 is not None and hasattr(catline,'SpectralIndex:0'):
- spectrum = ModelClasses.SpectralIndex(float(getattr(catline,'SpectralIndex:0') or '0'),f0);
- else:
- spectrum = None;
- # see if we have extent parameters
- if stype == "GAUSSIAN":
- ex = float(getattr(catline,"MajorAxis","0") or "0");
- ey = float(getattr(catline,"MinorAxis","0") or "0");
- pa = float(getattr(catline,"Orientation","0") or "0");
- shape = ModelClasses.Gaussian(ex,ey,pa);
- else:
- shape = None;
- # create tags
- tags = {};
- for field in "Patch","Category":
- if hasattr(catline,field):
- tags['BBS_%s'%field] = getattr(catline,field);
- # OK, now form up the source object
- # position
- pos = ModelClasses.Position(catline.ra_rad,catline.dec_rad);
- # now create a source object
- src = SkyModel.Source(name,pos,flux,shape=shape,spectrum=spectrum,**tags);
- srclist.append(src);
- # check if it's the brightest
- brightness = src.brightness();
- if brightness > maxbright:
- maxbright = brightness;
- brightest_name = src.name;
- radec0 = catline.ra_rad,catline.dec_rad;
- except:
- dprintf(0,"%s:%d: %s, skipping\n",filename,linenum,str(sys.exc_info()[1]));
- dprintf(2,"imported %d sources from file %s\n",len(srclist),filename);
- # create model
- model = ModelClasses.SkyModel(*srclist);
- if ref_freq is not None:
- model.setRefFreq(ref_freq);
- # setup model center
- if center_on_brightest and radec0:
- dprintf(2,"setting model centre to brightest source %s (%g Jy) at %f,%f\n",brightest_name,maxbright,*radec0);
- model.setFieldCenter(*radec0);
- elif patches:
- name,ra,dec = patches[0];
- dprintf(2,"setting model centre to first patch %s at %f,%f\n",name,ra,dec);
- model.setFieldCenter(ra,dec);
- # map patches to model tags
- model.setAttribute("BBS_Patches",patches);
- model.setAttribute("BBS_Format",format_str);
- # setup radial distances
- projection = Coordinates.Projection.SinWCS(*model.fieldCenter());
- for src in model.sources:
- l,m = projection.lm(src.pos.ra,src.pos.dec);
- src.setAttribute('r',math.sqrt(l*l+m*m));
- return model;
+def save(model, filename, sources=None, format=None, **kw):
+ """Exports model to a BBS catalog file"""
+ if sources is None:
+ sources = model.sources
+ dprintf(2, "writing %d model sources to BBS file %s\n", len(sources), filename)
+ # create catalog parser based on either specified format, or the model format, or the default format
+ format = format or getattr(model, 'BBS_Format',
+ "Name, Type, Patch, Ra, Dec, I, Q, U, V, ReferenceFrequency, SpectralIndexDegree='0', SpectralIndex:0='0.0', MajorAxis, MinorAxis, Orientation")
+ dprint(2, "format string is", format)
+ parser = CatalogParser(format)
+ # check for mandatory fields
+ for field in "Name", "Type":
+ if not parser.defines(field):
+ raise ValueError, "Output format lacks mandatory field '%s'" % field
+ # open file
+ ff = open(filename, mode="wt")
+ ff.write("# (%s) = format\n# The above line defines the field order and is required.\n\n" % format)
+ # write patches
+ for name, ra, dec in getattr(model, "BBS_Patches", []):
+ catline = parser.newline()
+ catline.Patch = name
+ catline.setPosition(ra, dec)
+ ff.write(catline.makeStr() + "\n")
+ ff.write("\n")
+ # write sources
+ nsrc = 0
+ for src in sources:
+ catline = parser.newline()
+ # type
+ if src.shape is None:
+ catline.Type = "POINT"
+ elif isinstance(src.shape, ModelClasses.Gaussian):
+ catline.Type = "GAUSSIAN"
+ else:
+ dprint(3, "skipping source '%s': non-supported type '%s'" % (src.name, src.shape.typecode))
+ continue
+ # name and patch
+ name = src.name
+ patch = getattr(src, 'BBS_Patch', '')
+ if patch and name.startswith(patch + ':'):
+ name = name[(len(patch) + 1):]
+ catline.Name = name
+ setattr(catline, 'Patch', patch)
+ # position
+ catline.setPosition(src.pos.ra, src.pos.dec)
+ # fluxes
+ for stokes in "IQUV":
+ setattr(catline, stokes, str(getattr(src.flux, stokes, 0.)))
+ # reference freq
+ freq0 = (src.spectrum and getattr(src.spectrum, 'freq0', None)) or getattr(src.flux, 'freq0', None)
+ if freq0 is not None:
+ setattr(catline, 'ReferenceFrequency', str(freq0))
+ # RM, spi
+ if isinstance(src.spectrum, ModelClasses.SpectralIndex):
+ setattr(catline, 'SpectralIndexDegree', '0')
+ setattr(catline, 'SpectralIndex:0', str(src.spectrum.spi))
+ if isinstance(src.flux, ModelClasses.PolarizationWithRM):
+ setattr(catline, 'RotationMeasure', str(src.flux.rm))
+ # shape
+ if isinstance(src.shape, ModelClasses.Gaussian):
+ setattr(catline, 'MajorAxis', src.shape.ex)
+ setattr(catline, 'MinorAxis', src.shape.ey)
+ setattr(catline, 'Orientation', src.shape.pa)
+ # write line
+ ff.write(catline.makeStr() + "\n")
+ nsrc += 1
-def save (model,filename,sources=None,format=None,**kw):
- """Exports model to a BBS catalog file""";
- if sources is None:
- sources = model.sources;
- dprintf(2,"writing %d model sources to BBS file %s\n",len(sources),filename);
- # create catalog parser based on either specified format, or the model format, or the default format
- format = format or getattr(model,'BBS_Format',
- "Name, Type, Patch, Ra, Dec, I, Q, U, V, ReferenceFrequency, SpectralIndexDegree='0', SpectralIndex:0='0.0', MajorAxis, MinorAxis, Orientation");
- dprint(2,"format string is",format);
- parser = CatalogParser(format);
- # check for mandatory fields
- for field in "Name","Type":
- if not parser.defines(field):
- raise ValueError,"Output format lacks mandatory field '%s'"%field;
- # open file
- ff = open(filename,mode="wt");
- ff.write("# (%s) = format\n# The above line defines the field order and is required.\n\n"%format);
- # write patches
- for name,ra,dec in getattr(model,"BBS_Patches",[]):
- catline = parser.newline();
- catline.Patch = name;
- catline.setPosition(ra,dec);
- ff.write(catline.makeStr()+"\n");
- ff.write("\n");
- # write sources
- nsrc = 0;
- for src in sources:
- catline = parser.newline();
- # type
- if src.shape is None:
- catline.Type = "POINT";
- elif isinstance(src.shape,ModelClasses.Gaussian):
- catline.Type = "GAUSSIAN";
- else:
- dprint(3,"skipping source '%s': non-supported type '%s'"%(src.name,src.shape.typecode));
- continue;
- # name and patch
- name = src.name;
- patch = getattr(src,'BBS_Patch','');
- if patch and name.startswith(patch+':'):
- name = name[(len(patch)+1):]
- catline.Name = name;
- setattr(catline,'Patch',patch);
- # position
- catline.setPosition(src.pos.ra,src.pos.dec);
- # fluxes
- for stokes in "IQUV":
- setattr(catline,stokes,str(getattr(src.flux,stokes,0.)));
- # reference freq
- freq0 = (src.spectrum and getattr(src.spectrum,'freq0',None)) or getattr(src.flux,'freq0',None);
- if freq0 is not None:
- setattr(catline,'ReferenceFrequency',str(freq0));
- # RM, spi
- if isinstance(src.spectrum,ModelClasses.SpectralIndex):
- setattr(catline,'SpectralIndexDegree','0');
- setattr(catline,'SpectralIndex:0',str(src.spectrum.spi));
- if isinstance(src.flux,ModelClasses.PolarizationWithRM):
- setattr(catline,'RotationMeasure',str(src.flux.rm));
- # shape
- if isinstance(src.shape,ModelClasses.Gaussian):
- setattr(catline,'MajorAxis',src.shape.ex);
- setattr(catline,'MinorAxis',src.shape.ey);
- setattr(catline,'Orientation',src.shape.pa);
- # write line
- ff.write(catline.makeStr()+"\n");
- nsrc += 1;
-
- ff.close();
- dprintf(1,"wrote %d sources to file %s\n",nsrc,filename);
+ ff.close()
+ dprintf(1, "wrote %d sources to file %s\n", nsrc, filename)
-Tigger.Models.Formats.registerFormat("BBS",load,"BBS source catalog",(".cat",".catalog"),export_func=save);
+Tigger.Models.Formats.registerFormat("BBS", load, "BBS source catalog", (".cat", ".catalog"), export_func=save)
diff --git a/Tigger/Models/Formats/ModelHTML.py b/Tigger/Models/Formats/ModelHTML.py
index 764c23c..ac95104 100644
--- a/Tigger/Models/Formats/ModelHTML.py
+++ b/Tigger/Models/Formats/ModelHTML.py
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
#
-#% $Id$
+# % $Id$
#
#
# Copyright (C) 2002-2011
@@ -25,168 +25,172 @@
#
import time
-import os.path
-import sys
import traceback
from HTMLParser import HTMLParser
import Kittens.utils
-_verbosity = Kittens.utils.verbosity(name="lsmhtml");
-dprint = _verbosity.dprint;
-dprintf = _verbosity.dprintf;
+
+_verbosity = Kittens.utils.verbosity(name="lsmhtml")
+dprint = _verbosity.dprint
+dprintf = _verbosity.dprintf
from Tigger.Models import ModelClasses
-from Tigger.Models import SkyModel
-
-DefaultExtension = "lsm.html";
-
-def save (model,filename,sources=None,**kw):
- if sources is None:
- sources = model.sources;
- fobj = file(filename,'w');
- fobj.write("""\n""");
- if model.name is not None:
- fobj.write(model.renderAttrMarkup('name',model.name,tags='TITLE',verbose="Sky model: "));
- fobj.write("\n");
- # write list of sources
- fobj.write("""Source list
\n\n""");
- for src in sources:
- fobj.write(src.renderMarkup(tags=["TR\n","TD"]));
- fobj.write("\n");
- fobj.write("""
\n""");
- # plot styles
- if model.plotstyles is not None:
- fobj.write("""Plot styles
\n\n""");
- fobj.write(model.renderAttrMarkup('plotstyles',model.plotstyles,tags=['A','TR\n','TD'],verbose=""));
- fobj.write("""
\n""");
- # other attributes
- fobj.write("\n");
- fobj.write("""Other properties
\n""");
- if model.pbexp is not None:
- fobj.write("");
- fobj.write(model.renderAttrMarkup('pbexp',model.pbexp,tags='A',verbose="Primary beam expression: "));
- fobj.write("
\n");
- if model.freq0 is not None:
- fobj.write("");
- fobj.write(model.renderAttrMarkup('freq0',model.freq0,tags='A',verbose="Reference frequency, Hz: "));
- fobj.write("
\n");
- if model.ra0 is not None or model.dec0 is not None:
- fobj.write("");
- fobj.write(model.renderAttrMarkup('ra0',model.ra0,tags='A',verbose="Field centre ra: "));
- fobj.write(model.renderAttrMarkup('dec0',model.dec0,tags='A',verbose="dec: "));
- fobj.write("
\n");
- for attr,value in model.getExtraAttributes():
- if attr not in ("pbexp","freq0","ra0","dec0"):
- fobj.write("");
- fobj.write(model.renderAttrMarkup(attr,value,tags='A'));
- fobj.write("
\n");
- fobj.write("""\n""");
-
-def load (filename,**kw):
- parser = ModelIndexParser();
- parser.reset();
- for line in file(filename):
- parser.feed(line);
- parser.close();
- if not parser.toplevel_objects:
- raise RuntimeError,"failed to load sky model from file %s"%filename;
- return parser.toplevel_objects[0];
-
-class ModelIndexParser (HTMLParser):
- def reset (self):
- HTMLParser.reset(self);
- self.objstack = [];
- self.tagstack = [];
- self.toplevel_objects = [];
-
- def end (self):
- dprintf(4,"end");
-
- def handle_starttag (self,tag,attrs):
- dprint(4,"start tag",tag,attrs);
- attrs = dict(attrs);
- # append tag to tag stack. Second element in tuple indicates whether
- # tag is associated with the start of an object definition
- self.tagstack.append([tag,None]);
- # see if attributes describe an LSM object
- # 'type' is an object class
- mdltype = attrs.get('mdltype');
- if not mdltype:
- return;
- # 'attr' is an attribute name. If this is set, then the object is an attribute
- # of the parent-level class
- mdlattr = attrs.get('mdlattr');
- # 'value' is a value. If this is set, then the object can be created from a string
- mdlval = attrs.get('mdlval');
- dprintf(3,"model item type %s, attribute %s, inline value %s\n",mdltype,mdlattr,mdlval);
- if mdlattr and not self.objstack:
- dprintf(0,"WARNING: attribute %s at top level, ignoring\n",mdlattr);
- return;
- # Now look up the class in our globals, or in ModelClasses
- typeobj = ModelClasses.AtomicTypes.get(mdltype) or ModelClasses.__dict__.get(mdltype);
- if not callable(typeobj):
- dprintf(0,"WARNING: unknown object type %s, ignoring\n",mdltype);
- return;
- # see if object value is inlined
- if mdlval is not None:
- try:
- obj = typeobj(eval(mdlval));
- except:
- traceback.print_exc();
- dprintf(0,"WARNING: failed to create object of type %s from string value '%s', ignoring\n",mdltype,mdlval);
- return;
- self.add_object(mdlattr,obj);
- # else add object to stack and start accumulating attributes
- else:
- # change entry on tagstack to indicate that this tag started an object
- self.tagstack[-1][1] = len(self.objstack);
- # append object entry to stack -- we'll create the object when a corresponding end-tag
- # is encountered.
- self.objstack.append([mdlattr,typeobj,[],{}]);
-
- def handle_endtag (self,endtag):
- dprint(4,"end tag",endtag);
- # close all tags from top of stack, until we hit this one's start tag
- while self.tagstack:
- tag,nobj = self.tagstack.pop(-1);
- dprint(4,"closing tag",tag);
- # if tag corresponds to an object, create object
- if nobj is not None:
- self.close_stack_object();
- if tag == endtag:
- break;
-
- def add_object (self,attr,obj):
- """Adds object to model."""
- # if no object stack, then object is a top-level container
- if not self.objstack:
- if attr:
- dprintf(0,"WARNING: attribute %s at top level, ignoring\n",attr);
- return;
- self.toplevel_objects.append(obj);
- # else add object as attribute or argument of top container in the stack
- else:
- if attr:
- self.objstack[-1][3][attr] = obj;
- else:
- self.objstack[-1][2].append(obj);
-
- def close_stack_object (self):
- """This function is called when an object from the top of the stack needs to be created.
- Stops accumulating attributes and calls the object constructor."""
- mdlattr,typeobj,args,kws = self.objstack.pop(-1);
- # create object
- try:
- if typeobj in (list,tuple):
- obj = typeobj(args);
- else:
- obj = typeobj(*args,**kws);
- except:
- traceback.print_exc();
- dprintf(0,"WARNING: failed to create object of type %s for attribute %s, ignoring\n",typeobj,mdlattr);
- return;
- # add the object to model
- self.add_object(mdlattr,obj);
+
+DefaultExtension = "lsm.html"
+
+
+def save(model, filename, sources=None, **kw):
+ if sources is None:
+ sources = model.sources
+ fobj = file(filename, 'w')
+ fobj.write("""\n""")
+ if model.name is not None:
+ fobj.write(model.renderAttrMarkup('name', model.name, tags='TITLE', verbose="Sky model: "))
+ fobj.write("\n")
+ # write list of sources
+ fobj.write("""Source list
\n\n""")
+ for src in sources:
+ fobj.write(src.renderMarkup(tags=["TR\n", "TD"]))
+ fobj.write("\n")
+ fobj.write("""
\n""")
+ # plot styles
+ if model.plotstyles is not None:
+ fobj.write("""Plot styles
\n\n""")
+ fobj.write(model.renderAttrMarkup('plotstyles', model.plotstyles, tags=['A', 'TR\n', 'TD'], verbose=""))
+ fobj.write("""
\n""")
+ # other attributes
+ fobj.write("\n")
+ fobj.write("""Other properties
\n""")
+ if model.pbexp is not None:
+ fobj.write("")
+ fobj.write(model.renderAttrMarkup('pbexp', model.pbexp, tags='A', verbose="Primary beam expression: "))
+ fobj.write("
\n")
+ if model.freq0 is not None:
+ fobj.write("")
+ fobj.write(model.renderAttrMarkup('freq0', model.freq0, tags='A', verbose="Reference frequency, Hz: "))
+ fobj.write("
\n")
+ if model.ra0 is not None or model.dec0 is not None:
+ fobj.write("")
+ fobj.write(model.renderAttrMarkup('ra0', model.ra0, tags='A', verbose="Field centre ra: "))
+ fobj.write(model.renderAttrMarkup('dec0', model.dec0, tags='A', verbose="dec: "))
+ fobj.write("
\n")
+ for attr, value in model.getExtraAttributes():
+ if attr not in ("pbexp", "freq0", "ra0", "dec0"):
+ fobj.write("")
+ fobj.write(model.renderAttrMarkup(attr, value, tags='A'))
+ fobj.write("
\n")
+ fobj.write("""\n""")
+
+
+def load(filename, **kw):
+ parser = ModelIndexParser()
+ parser.reset()
+ for line in file(filename):
+ parser.feed(line)
+ parser.close()
+ if not parser.toplevel_objects:
+ raise RuntimeError, "failed to load sky model from file %s" % filename
+ return parser.toplevel_objects[0]
+
+
+class ModelIndexParser(HTMLParser):
+ def reset(self):
+ HTMLParser.reset(self)
+ self.objstack = []
+ self.tagstack = []
+ self.toplevel_objects = []
+
+ def end(self):
+ dprintf(4, "end")
+
+ def handle_starttag(self, tag, attrs):
+ dprint(4, "start tag", tag, attrs)
+ attrs = dict(attrs)
+ # append tag to tag stack. Second element in tuple indicates whether
+ # tag is associated with the start of an object definition
+ self.tagstack.append([tag, None])
+ # see if attributes describe an LSM object
+ # 'type' is an object class
+ mdltype = attrs.get('mdltype')
+ if not mdltype:
+ return
+ # 'attr' is an attribute name. If this is set, then the object is an attribute
+ # of the parent-level class
+ mdlattr = attrs.get('mdlattr')
+ # 'value' is a value. If this is set, then the object can be created from a string
+ mdlval = attrs.get('mdlval')
+ dprintf(3, "model item type %s, attribute %s, inline value %s\n", mdltype, mdlattr, mdlval)
+ if mdlattr and not self.objstack:
+ dprintf(0, "WARNING: attribute %s at top level, ignoring\n", mdlattr)
+ return
+ # Now look up the class in our globals, or in ModelClasses
+ typeobj = ModelClasses.AtomicTypes.get(mdltype) or ModelClasses.__dict__.get(mdltype)
+ if not callable(typeobj):
+ dprintf(0, "WARNING: unknown object type %s, ignoring\n", mdltype)
+ return
+ # see if object value is inlined
+ if mdlval is not None:
+ try:
+ obj = typeobj(eval(mdlval))
+ except:
+ traceback.print_exc()
+ dprintf(0, "WARNING: failed to create object of type %s from string value '%s', ignoring\n", mdltype,
+ mdlval)
+ return
+ self.add_object(mdlattr, obj)
+ # else add object to stack and start accumulating attributes
+ else:
+ # change entry on tagstack to indicate that this tag started an object
+ self.tagstack[-1][1] = len(self.objstack)
+ # append object entry to stack -- we'll create the object when a corresponding end-tag
+ # is encountered.
+ self.objstack.append([mdlattr, typeobj, [], {}])
+
+ def handle_endtag(self, endtag):
+ dprint(4, "end tag", endtag)
+ # close all tags from top of stack, until we hit this one's start tag
+ while self.tagstack:
+ tag, nobj = self.tagstack.pop(-1)
+ dprint(4, "closing tag", tag)
+ # if tag corresponds to an object, create object
+ if nobj is not None:
+ self.close_stack_object()
+ if tag == endtag:
+ break
+
+ def add_object(self, attr, obj):
+ """Adds object to model."""
+ # if no object stack, then object is a top-level container
+ if not self.objstack:
+ if attr:
+ dprintf(0, "WARNING: attribute %s at top level, ignoring\n", attr)
+ return
+ self.toplevel_objects.append(obj)
+ # else add object as attribute or argument of top container in the stack
+ else:
+ if attr:
+ self.objstack[-1][3][attr] = obj
+ else:
+ self.objstack[-1][2].append(obj)
+
+ def close_stack_object(self):
+ """This function is called when an object from the top of the stack needs to be created.
+ Stops accumulating attributes and calls the object constructor."""
+ mdlattr, typeobj, args, kws = self.objstack.pop(-1)
+ # create object
+ try:
+ if typeobj in (list, tuple):
+ obj = typeobj(args)
+ else:
+ obj = typeobj(*args, **kws)
+ except:
+ traceback.print_exc()
+ dprintf(0, "WARNING: failed to create object of type %s for attribute %s, ignoring\n", typeobj, mdlattr)
+ return
+ # add the object to model
+ self.add_object(mdlattr, obj)
+
import Tigger.Models.Formats
-Tigger.Models.Formats.registerFormat("Tigger",load,"Tigger sky model",(".lsm.html",),export_func=save);
+
+Tigger.Models.Formats.registerFormat("Tigger", load, "Tigger sky model", (".lsm.html",), export_func=save)
diff --git a/Tigger/Models/Formats/NEWSTAR.py b/Tigger/Models/Formats/NEWSTAR.py
index 3369a66..548c8e6 100644
--- a/Tigger/Models/Formats/NEWSTAR.py
+++ b/Tigger/Models/Formats/NEWSTAR.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
#
-#% $Id$
+# % $Id$
#
#
# Copyright (C) 2002-2011
@@ -26,319 +26,323 @@
#
-import sys
-import traceback
import math
import struct
+import sys
import time
-import os.path
-
-import numpy
+import traceback
-import Kittens.utils
+import numpy
+import os.path
import Tigger.Models.Formats
from Tigger.Models import ModelClasses
from Tigger.Models import SkyModel
-from Tigger import Coordinates
-from Tigger.Models.Formats import dprint,dprintf
-
-def lm_ncp_to_radec (ra0,dec0,l,m):
- """Converts coordinates in l,m (NCP) relative to ra0,dec0 into ra,dec.""";
- sind0=math.sin(dec0)
- cosd0=math.cos(dec0)
- dl=l
- dm=m
- d0=dm*dm*sind0*sind0+dl*dl-2*dm*cosd0*sind0
- sind=math.sqrt(abs(sind0*sind0-d0))
- cosd=math.sqrt(abs(cosd0*cosd0+d0))
- if sind0>0:
- sind=abs(sind)
- else:
- sind=-abs(sind)
- dec=math.atan2(sind,cosd)
- if l != 0:
- ra=math.atan2(-dl,(cosd0-dm*sind0))+ra0
- else:
- ra=math.atan2((1e-10),(cosd0-dm*sind0))+ra0
- return ra,dec
-
-def radec_to_lm_ncp (ra0,dec0,ra,dec):
- """Converts coordinates in l,m (NCP) relative to ra0,dec0 into ra,dec.""";
- l=-math.sin(ra-ra0)*math.cos(dec)
- sind0=math.sin(dec0)
- if sind0 != 0:
- m=-(math.cos(ra-ra0)*math.cos(dec)-math.cos(dec0))/math.sin(dec0)
- else:
- m=0
- return (l,m)
-
-
-def parseGFH (gfh):
- """Parses the GFH (general file header?) structure at the beginning of the file""";
- ## type
- ftype = gfh[0:4].tostring()
- ## length & version
- fhlen,fver = struct.unpack('ii',gfh[4:12])
- ### creation date
- crdate = gfh[12:23].tostring()
- ### creation time
- crtime = gfh[23:28].tostring()
- ### revision date
- rrdate = gfh[28:39].tostring()
- ### revision time
- rrtime = gfh[39:44].tostring()
- ### revision count
- rcount = struct.unpack('i',gfh[44:48])
- rcount = rcount[0]
- ### node name
- nname = gfh[48:128].tostring()
- ### types
- dattp = struct.unpack('B',gfh[128:129])[0];
- link1,link2 = struct.unpack('ii',gfh[152:160]);
- ### the remaining info is not needed
- dprint(1,"read header type=%s, length=%d, version=%d, created=%s@%s, updated=%s@%s x %d, node name=%s, dattp=%d, link=%d,%d"%
- (ftype,fhlen,fver,crdate,crtime,rrdate,rrtime,rcount,nname,dattp,link1,link2));
- return (ftype,fhlen,fver,crdate,crtime,rrdate,rrtime,rcount,nname);
-
-def parseMDH (mdh):
- """Parses the MDH (model file header?) structure""";
- maxlin,modptr,nsources,mtype = struct.unpack('iiii',mdh[12:28]);
- mepoch = struct.unpack('f',mdh[28:32])[0];
- ra0,dec0,freq0 = struct.unpack('ddd',mdh[32:56]);
- ### Max. # of lines in model or disk version
- ### pointer to model ???
- ### no of sources in model
- ### model type(0: no ra,dec, 1=app, 2=epoch)
- ### Epoch (e.g. 1950) if TYP=2 (float) : 4 bytes
- ### Model centre RA (circles) : double
- ra0 *= math.pi*2;
- dec0 *= math.pi*2;
- ### Model centre FRQ (MHz)
- freq0 *= 1e6
- ### the remaining is not needed
- dprint(1,"read model header maxlines=%d, pointer=%d, sources=%d, type=%d, epoch=%f RA=%f, DEC=%f (rad) Freq=%f Hz"%
- (maxlin,modptr,nsources,mtype,mepoch,ra0,dec0,freq0));
- return (maxlin,modptr,nsources,mtype,mepoch,ra0,dec0,freq0);
-
-def load (filename,import_src=True,import_cc=True,min_extent=0,**kw):
- """Imports a NEWSTAR MDL file.
- min_extent is minimal source extent (in radians), above which a source will be treated as a Gaussian rather than a point component.
- import_src=False causes source components to be omitted
- import_cc=False causes clean components to be omitted
- """;
- srclist = [];
- dprint(1,"importing NEWSTAR file",filename);
- # build the LSM from a NewStar .MDL model file
- # if only_cleancomp=True, only clean components are used to build the LSM
- # if no_cleancomp=True, no clean components are used to build the LSM
- ff = open(filename,mode="rb");
-
- ### read GFH and MDH headers -- 512 bytes
- try:
- gfh = numpy.fromfile(ff,dtype=numpy.uint8,count=512);
- mdh = numpy.fromfile(ff,dtype=numpy.uint8,count=64);
- # parse headers
- ftype,fhlen,fver,crdate,crtime,rrdate,rrtime,rcount,nname = parseGFH(gfh);
- if ftype != ".MDL":
- raise TypeError;
- maxlin,modptr,nsources,mtype,mepoch,ra0,dec0,freq0 = parseMDH(mdh);
-
- beam_const = 65*1e-9*freq0;
-
- ## temp dict to hold unique nodenames
- unamedict={}
- ### Models -- 56 bytes
- for ii in xrange(0,nsources):
- mdl = numpy.fromfile(ff,dtype=numpy.uint8,count=56)
-
- ### source parameters
- sI,ll,mm,id,sQ,sU,sV,eX,eY,eP,SI,RM = struct.unpack('fffiffffffff',mdl[0:48])
- ### type bits
- bit1,bit2 = struct.unpack('BB',mdl[52:54]);
-
- # convert fluxes
- sI *= 0.005 # convert from WU to Jy (1WU=5mJy)
- sQ *= sI;
- sU *= sI;
- sV *= sI;
-
- # Interpret bitflags 1: bit 0= extended; bit 1= Q|U|V <>0 and no longer used according to Wim
- fl_ext = bit1&1;
- # Interpret bitflags 2: bit 0= clean component; bit 3= beamed
- fl_cc = bit2&1;
- fl_beamed = bit2&8;
-
- ### extended source params: in arcsec, so multiply by ???
- if fl_ext:
- ## the procedure is NMOEXT in nscan/nmoext.for
- if eP == 0 and eX == eY:
- r0 = 0
- else:
- r0 = .5*math.atan2(-eP,eY-eX)
- r1 = math.sqrt(eP*eP+(eX-eY)*(eX-eY))
- r2 = eX+eY
- eX = 2*math.sqrt(abs(0.5*(r2+r1)))
- eY = 2*math.sqrt(abs(0.5*(r2-r1)))
- eP = r0
-
- # NEWSTAR MDL lists might have same source twice if they are
- # clean components, so make a unique name for them
- bname='N'+str(id);
- if unamedict.has_key(bname):
- uniqname = bname+'_'+str(unamedict[bname])
- unamedict[bname] += 1
- else:
- uniqname = bname
- unamedict[bname] = 1
- # compose source information
- pos = ModelClasses.Position(*lm_ncp_to_radec(ra0,dec0,ll,mm));
- flux = ModelClasses.PolarizationWithRM(sI,sQ,sU,sV,RM,freq0);
- spectrum = ModelClasses.SpectralIndex(SI,freq0);
- tags = {};
- # work out beam gain and apparent flux
- tags['_lm_ncp'] = (ll,mm);
- tags['_newstar_r'] = tags['r'] = r = math.sqrt(ll*ll+mm*mm);
- tags['newstar_beamgain'] = bg = max(math.cos(beam_const*r)**6,.01);
- tags['newstar_id'] = id;
- if fl_beamed:
- tags['Iapp'] = sI*bg;
- tags['newstar_beamed'] = True;
- tags['flux_intrinsic'] = True;
- else:
- tags['flux_apparent'] = True;
- # make some tags based on model flags
- if fl_cc:
- tags['newstar_cc'] = True;
- # make shape if extended
- if fl_ext and max(eX,eY) >= min_extent:
- shape = ModelClasses.Gaussian(eX,eY,eP);
- else:
- shape = None;
- # compute apparent flux
- src = SkyModel.Source(uniqname,pos,flux,shape=shape,spectrum=spectrum,**tags);
- srclist.append(src);
- except:
- traceback.print_exc();
- raise TypeError("%s does not appear to be a valid NEWSTAR MDL file"%filename);
-
- dprintf(2,"imported %d sources from file %s\n",len(srclist),filename);
- return ModelClasses.SkyModel(ra0=ra0,dec0=dec0,freq0=freq0,pbexp='max(cos(65*1e-9*fq*r)**6,.01)',*srclist);
-
-
-def save (model,filename,freq0=None,sources=None,**kw):
- """Saves model to a NEWSTAR MDL file.
- The MDL file must exist, since the existing header is reused.
- 'sources' is a list of sources to write, if None, then model.sources is used.
- """
- if sources is None:
- sources = model.sources;
- dprintf(2,"writing %s model sources to NEWSTAR file\n",len(sources),filename);
-
- ra0,dec0 = model.fieldCenter();
- freq0 = freq0 or model.refFreq();
- # if freq0 is not specified, scan sources
- if freq0 is None:
- for src in sources:
- freq0 = (src.spectrum and getattr(src.spectrum,'freq0',None)) or getattr(src.flux,'freq0',None);
- if freq0:
- break;
+from Tigger.Models.Formats import dprint, dprintf
+
+
+def lm_ncp_to_radec(ra0, dec0, l, m):
+ """Converts coordinates in l,m (NCP) relative to ra0,dec0 into ra,dec."""
+ sind0 = math.sin(dec0)
+ cosd0 = math.cos(dec0)
+ dl = l
+ dm = m
+ d0 = dm * dm * sind0 * sind0 + dl * dl - 2 * dm * cosd0 * sind0
+ sind = math.sqrt(abs(sind0 * sind0 - d0))
+ cosd = math.sqrt(abs(cosd0 * cosd0 + d0))
+ if sind0 > 0:
+ sind = abs(sind)
+ else:
+ sind = -abs(sind)
+ dec = math.atan2(sind, cosd)
+ if l != 0:
+ ra = math.atan2(-dl, (cosd0 - dm * sind0)) + ra0
else:
- raise ValueError("unable to determine NEWSTAR model reference frequency, please specify one explicitly.");
-
- ff = open(filename,mode="wb");
-
- ### create GFH header
- gfh = numpy.zeros(512,dtype=numpy.uint8);
- datestr = time.strftime("%d-%m-%Y");
- timestr = time.strftime("%H:%M");
- struct.pack_into("4sii11s5s11s5si80sB",gfh,0,".MDL",512,1,
- datestr,timestr,datestr,timestr,0,
- os.path.splitext(os.path.basename(filename))[0],6); # 6=datatype
- # link1/link2 gives the header size actually
- struct.pack_into("ii",gfh,152,512,512);
- gfh.tofile(ff);
-
- # create MDH header
- mdh = numpy.zeros(64,dtype=numpy.uint8);
- struct.pack_into('iiii',mdh,12,1,576,0,2); # maxlin,pointer,num_sources,mtype
- struct.pack_into('f',mdh,28,getattr(model,'epoch',2000));
- struct.pack_into('ddd',mdh,32,ra0/(2*math.pi),dec0/(2*math.pi),freq0*1e-6);
- mdh.tofile(ff);
-
- # get the max ID, if specified
- max_id = max([ getattr(src,'newstar_id',0) for src in sources ]);
- # now loop over model sources
- # count how many are written out -- only point sources and gaussians are actually written out, the rest are skipped
- nsrc = 0;
- for src in sources:
- # create empty newstar source structure
- mdl = numpy.zeros(56,dtype=numpy.uint8);
-
- if src.shape and not isinstance(src.shape,ModelClasses.Gaussian):
- dprint(3,"skipping source '%s': non-supported type '%s'"%(src.name,src.shape.typecode));
- continue;
-
- stI = src.flux.I;
- # get l,m NCP position -- either from tag, or compute
- lm = getattr(src,'_lm_ncp',None);
- if lm:
- if isinstance(lm,(tuple,list)) and len(lm) == 2:
- l,m = lm;
- else:
- dprint(0,"warning: skipping source '%s' because its _lm_ncp attribute is malformed (tuple of 2 values expected)"%src.name);
- continue;
+ ra = math.atan2((1e-10), (cosd0 - dm * sind0)) + ra0
+ return ra, dec
+
+
+def radec_to_lm_ncp(ra0, dec0, ra, dec):
+ """Converts coordinates in l,m (NCP) relative to ra0,dec0 into ra,dec."""
+ l = -math.sin(ra - ra0) * math.cos(dec)
+ sind0 = math.sin(dec0)
+ if sind0 != 0:
+ m = -(math.cos(ra - ra0) * math.cos(dec) - math.cos(dec0)) / math.sin(dec0)
else:
- l,m = radec_to_lm_ncp(ra0,dec0,src.pos.ra,src.pos.dec);
-
- # update source count
- nsrc += 1;
- # generate source id
- src_id = getattr(src,'newstar_id',None);
- if src_id is None:
- src_id = max_id = max_id+1;
-
- # encode position, flux, identifier -- also, convert flux from Jy to WU to Jy (1WU=5mJy)
- struct.pack_into('fffi',mdl,0,stI/0.005,l,m,src_id);
-
- # encode fractional polarization
- struct.pack_into('fff',mdl,16,*[ getattr(src.flux,stokes,0.0)/stI for stokes in "QUV" ]);
-
- ## encode flag & type bits
- ## Flag: bit 0= extended; bit 1= Q|U|V <>0 and no longer used according to Wim
- ## Type: bit 0= clean component; bit 3= beamed
- beamed = getattr(src,'flux_intrinsic',False) or getattr(src,'newstar_beamed',False);
- struct.pack_into('BB',mdl,52,
- 1 if src.shape else 0,
- (1 if getattr(src,'newstar_cc',False) else 0) | (8 if beamed else 0));
-
- ### extended source parameters
- if src.shape:
- ## the procedure is NMOEXF in nscan/nmoext.for
- R0 = math.cos(src.shape.pa);
- R1 = -math.sin(src.shape.pa);
- R2 = (.5*src.shape.ex)**2;
- R3 = (.5*src.shape.ey)**2;
- ex = R2*R1*R1+R3*R0*R0
- ey = R2*R0*R0+R3*R1*R1
- pa = 2*(R2-R3)*R0*R1
- struct.pack_into('fff',mdl,28,ex,ey,pa);
-
- ### spectral index
- if isinstance(src.spectrum,ModelClasses.SpectralIndex):
- struct.pack_into('f',mdl,40,src.spectrum.spi);
-
- if isinstance(src.flux,ModelClasses.PolarizationWithRM):
- struct.pack_into('f',mdl,44,src.flux.rm);
-
- mdl.tofile(ff);
-
- # update MDH header with the new number of sources
- struct.pack_into('i',mdh,20,nsrc);
- ff.seek(512);
- mdh.tofile(ff);
- ff.close();
- dprintf(1,"wrote %d sources to file %s\n",nsrc,filename);
-
-
-Tigger.Models.Formats.registerFormat("NEWSTAR",load,"NEWSTAR model file",(".mdl",".MDL"),export_func=save);
+ m = 0
+ return (l, m)
+
+
+def parseGFH(gfh):
+ """Parses the GFH (general file header?) structure at the beginning of the file"""
+ ## type
+ ftype = gfh[0:4].tostring()
+ ## length & version
+ fhlen, fver = struct.unpack('ii', gfh[4:12])
+ ### creation date
+ crdate = gfh[12:23].tostring()
+ ### creation time
+ crtime = gfh[23:28].tostring()
+ ### revision date
+ rrdate = gfh[28:39].tostring()
+ ### revision time
+ rrtime = gfh[39:44].tostring()
+ ### revision count
+ rcount = struct.unpack('i', gfh[44:48])
+ rcount = rcount[0]
+ ### node name
+ nname = gfh[48:128].tostring()
+ ### types
+ dattp = struct.unpack('B', gfh[128:129])[0]
+ link1, link2 = struct.unpack('ii', gfh[152:160])
+ ### the remaining info is not needed
+ dprint(1,
+ "read header type=%s, length=%d, version=%d, created=%s@%s, updated=%s@%s x %d, node name=%s, dattp=%d, link=%d,%d" %
+ (ftype, fhlen, fver, crdate, crtime, rrdate, rrtime, rcount, nname, dattp, link1, link2))
+ return (ftype, fhlen, fver, crdate, crtime, rrdate, rrtime, rcount, nname)
+
+
+def parseMDH(mdh):
+ """Parses the MDH (model file header?) structure"""
+ maxlin, modptr, nsources, mtype = struct.unpack('iiii', mdh[12:28])
+ mepoch = struct.unpack('f', mdh[28:32])[0]
+ ra0, dec0, freq0 = struct.unpack('ddd', mdh[32:56])
+ ### Max. # of lines in model or disk version
+ ### pointer to model ???
+ ### no of sources in model
+ ### model type(0: no ra,dec, 1=app, 2=epoch)
+ ### Epoch (e.g. 1950) if TYP=2 (float) : 4 bytes
+ ### Model centre RA (circles) : double
+ ra0 *= math.pi * 2
+ dec0 *= math.pi * 2
+ ### Model centre FRQ (MHz)
+ freq0 *= 1e6
+ ### the remaining is not needed
+ dprint(1,
+ "read model header maxlines=%d, pointer=%d, sources=%d, type=%d, epoch=%f RA=%f, DEC=%f (rad) Freq=%f Hz" %
+ (maxlin, modptr, nsources, mtype, mepoch, ra0, dec0, freq0))
+ return (maxlin, modptr, nsources, mtype, mepoch, ra0, dec0, freq0)
+
+
+def load(filename, import_src=True, import_cc=True, min_extent=0, **kw):
+ """Imports a NEWSTAR MDL file.
+ min_extent is minimal source extent (in radians), above which a source will be treated as a Gaussian rather than a point component.
+ import_src=False causes source components to be omitted
+ import_cc=False causes clean components to be omitted
+ """
+ srclist = []
+ dprint(1, "importing NEWSTAR file", filename)
+ # build the LSM from a NewStar .MDL model file
+ # if only_cleancomp=True, only clean components are used to build the LSM
+ # if no_cleancomp=True, no clean components are used to build the LSM
+ ff = open(filename, mode="rb")
+
+ ### read GFH and MDH headers -- 512 bytes
+ try:
+ gfh = numpy.fromfile(ff, dtype=numpy.uint8, count=512)
+ mdh = numpy.fromfile(ff, dtype=numpy.uint8, count=64)
+ # parse headers
+ ftype, fhlen, fver, crdate, crtime, rrdate, rrtime, rcount, nname = parseGFH(gfh)
+ if ftype != ".MDL":
+ raise TypeError
+ maxlin, modptr, nsources, mtype, mepoch, ra0, dec0, freq0 = parseMDH(mdh)
+
+ beam_const = 65 * 1e-9 * freq0
+
+ ## temp dict to hold unique nodenames
+ unamedict = {}
+ ### Models -- 56 bytes
+ for ii in xrange(0, nsources):
+ mdl = numpy.fromfile(ff, dtype=numpy.uint8, count=56)
+
+ ### source parameters
+ sI, ll, mm, id, sQ, sU, sV, eX, eY, eP, SI, RM = struct.unpack('fffiffffffff', mdl[0:48])
+ ### type bits
+ bit1, bit2 = struct.unpack('BB', mdl[52:54])
+
+ # convert fluxes
+ sI *= 0.005 # convert from WU to Jy (1WU=5mJy)
+ sQ *= sI
+ sU *= sI
+ sV *= sI
+
+ # Interpret bitflags 1: bit 0= extended; bit 1= Q|U|V <>0 and no longer used according to Wim
+ fl_ext = bit1 & 1
+ # Interpret bitflags 2: bit 0= clean component; bit 3= beamed
+ fl_cc = bit2 & 1
+ fl_beamed = bit2 & 8
+
+ ### extended source params: in arcsec, so multiply by ???
+ if fl_ext:
+ ## the procedure is NMOEXT in nscan/nmoext.for
+ if eP == 0 and eX == eY:
+ r0 = 0
+ else:
+ r0 = .5 * math.atan2(-eP, eY - eX)
+ r1 = math.sqrt(eP * eP + (eX - eY) * (eX - eY))
+ r2 = eX + eY
+ eX = 2 * math.sqrt(abs(0.5 * (r2 + r1)))
+ eY = 2 * math.sqrt(abs(0.5 * (r2 - r1)))
+ eP = r0
+
+ # NEWSTAR MDL lists might have same source twice if they are
+ # clean components, so make a unique name for them
+ bname = 'N' + str(id)
+ if unamedict.has_key(bname):
+ uniqname = bname + '_' + str(unamedict[bname])
+ unamedict[bname] += 1
+ else:
+ uniqname = bname
+ unamedict[bname] = 1
+ # compose source information
+ pos = ModelClasses.Position(*lm_ncp_to_radec(ra0, dec0, ll, mm))
+ flux = ModelClasses.PolarizationWithRM(sI, sQ, sU, sV, RM, freq0)
+ spectrum = ModelClasses.SpectralIndex(SI, freq0)
+ tags = {}
+ # work out beam gain and apparent flux
+ tags['_lm_ncp'] = (ll, mm)
+ tags['_newstar_r'] = tags['r'] = r = math.sqrt(ll * ll + mm * mm)
+ tags['newstar_beamgain'] = bg = max(math.cos(beam_const * r) ** 6, .01)
+ tags['newstar_id'] = id
+ if fl_beamed:
+ tags['Iapp'] = sI * bg
+ tags['newstar_beamed'] = True
+ tags['flux_intrinsic'] = True
+ else:
+ tags['flux_apparent'] = True
+ # make some tags based on model flags
+ if fl_cc:
+ tags['newstar_cc'] = True
+ # make shape if extended
+ if fl_ext and max(eX, eY) >= min_extent:
+ shape = ModelClasses.Gaussian(eX, eY, eP)
+ else:
+ shape = None
+ # compute apparent flux
+ src = SkyModel.Source(uniqname, pos, flux, shape=shape, spectrum=spectrum, **tags)
+ srclist.append(src)
+ except:
+ traceback.print_exc()
+ raise TypeError("%s does not appear to be a valid NEWSTAR MDL file" % filename)
+
+ dprintf(2, "imported %d sources from file %s\n", len(srclist), filename)
+ return ModelClasses.SkyModel(ra0=ra0, dec0=dec0, freq0=freq0, pbexp='max(cos(65*1e-9*fq*r)**6,.01)', *srclist)
+
+
+def save(model, filename, freq0=None, sources=None, **kw):
+ """Saves model to a NEWSTAR MDL file.
+ The MDL file must exist, since the existing header is reused.
+ 'sources' is a list of sources to write, if None, then model.sources is used.
+ """
+ if sources is None:
+ sources = model.sources
+ dprintf(2, "writing %s model sources to NEWSTAR file\n", len(sources), filename)
+
+ ra0, dec0 = model.fieldCenter()
+ freq0 = freq0 or model.refFreq()
+ # if freq0 is not specified, scan sources
+ if freq0 is None:
+ for src in sources:
+ freq0 = (src.spectrum and getattr(src.spectrum, 'freq0', None)) or getattr(src.flux, 'freq0', None)
+ if freq0:
+ break
+ else:
+ raise ValueError("unable to determine NEWSTAR model reference frequency, please specify one explicitly.")
+
+ ff = open(filename, mode="wb")
+
+ ### create GFH header
+ gfh = numpy.zeros(512, dtype=numpy.uint8)
+ datestr = time.strftime("%d-%m-%Y")
+ timestr = time.strftime("%H:%M")
+ struct.pack_into("4sii11s5s11s5si80sB", gfh, 0, ".MDL", 512, 1,
+ datestr, timestr, datestr, timestr, 0,
+ os.path.splitext(os.path.basename(filename))[0], 6); # 6=datatype
+ # link1/link2 gives the header size actually
+ struct.pack_into("ii", gfh, 152, 512, 512)
+ gfh.tofile(ff)
+
+ # create MDH header
+ mdh = numpy.zeros(64, dtype=numpy.uint8)
+ struct.pack_into('iiii', mdh, 12, 1, 576, 0, 2); # maxlin,pointer,num_sources,mtype
+ struct.pack_into('f', mdh, 28, getattr(model, 'epoch', 2000))
+ struct.pack_into('ddd', mdh, 32, ra0 / (2 * math.pi), dec0 / (2 * math.pi), freq0 * 1e-6)
+ mdh.tofile(ff)
+
+ # get the max ID, if specified
+ max_id = max([getattr(src, 'newstar_id', 0) for src in sources])
+ # now loop over model sources
+ # count how many are written out -- only point sources and gaussians are actually written out, the rest are skipped
+ nsrc = 0
+ for src in sources:
+ # create empty newstar source structure
+ mdl = numpy.zeros(56, dtype=numpy.uint8)
+
+ if src.shape and not isinstance(src.shape, ModelClasses.Gaussian):
+ dprint(3, "skipping source '%s': non-supported type '%s'" % (src.name, src.shape.typecode))
+ continue
+
+ stI = src.flux.I
+ # get l,m NCP position -- either from tag, or compute
+ lm = getattr(src, '_lm_ncp', None)
+ if lm:
+ if isinstance(lm, (tuple, list)) and len(lm) == 2:
+ l, m = lm
+ else:
+ dprint(0,
+ "warning: skipping source '%s' because its _lm_ncp attribute is malformed (tuple of 2 values expected)" % src.name)
+ continue
+ else:
+ l, m = radec_to_lm_ncp(ra0, dec0, src.pos.ra, src.pos.dec)
+
+ # update source count
+ nsrc += 1
+ # generate source id
+ src_id = getattr(src, 'newstar_id', None)
+ if src_id is None:
+ src_id = max_id = max_id + 1
+
+ # encode position, flux, identifier -- also, convert flux from Jy to WU to Jy (1WU=5mJy)
+ struct.pack_into('fffi', mdl, 0, stI / 0.005, l, m, src_id)
+
+ # encode fractional polarization
+ struct.pack_into('fff', mdl, 16, *[getattr(src.flux, stokes, 0.0) / stI for stokes in "QUV"])
+
+ ## encode flag & type bits
+ ## Flag: bit 0= extended; bit 1= Q|U|V <>0 and no longer used according to Wim
+ ## Type: bit 0= clean component; bit 3= beamed
+ beamed = getattr(src, 'flux_intrinsic', False) or getattr(src, 'newstar_beamed', False)
+ struct.pack_into('BB', mdl, 52,
+ 1 if src.shape else 0,
+ (1 if getattr(src, 'newstar_cc', False) else 0) | (8 if beamed else 0))
+
+ ### extended source parameters
+ if src.shape:
+ ## the procedure is NMOEXF in nscan/nmoext.for
+ R0 = math.cos(src.shape.pa)
+ R1 = -math.sin(src.shape.pa)
+ R2 = (.5 * src.shape.ex) ** 2
+ R3 = (.5 * src.shape.ey) ** 2
+ ex = R2 * R1 * R1 + R3 * R0 * R0
+ ey = R2 * R0 * R0 + R3 * R1 * R1
+ pa = 2 * (R2 - R3) * R0 * R1
+ struct.pack_into('fff', mdl, 28, ex, ey, pa)
+
+ ### spectral index
+ if isinstance(src.spectrum, ModelClasses.SpectralIndex):
+ struct.pack_into('f', mdl, 40, src.spectrum.spi)
+
+ if isinstance(src.flux, ModelClasses.PolarizationWithRM):
+ struct.pack_into('f', mdl, 44, src.flux.rm)
+
+ mdl.tofile(ff)
+
+ # update MDH header with the new number of sources
+ struct.pack_into('i', mdh, 20, nsrc)
+ ff.seek(512)
+ mdh.tofile(ff)
+ ff.close()
+ dprintf(1, "wrote %d sources to file %s\n", nsrc, filename)
+
+
+Tigger.Models.Formats.registerFormat("NEWSTAR", load, "NEWSTAR model file", (".mdl", ".MDL"), export_func=save)
diff --git a/Tigger/Models/Formats/PyBDSMGaul.py b/Tigger/Models/Formats/PyBDSMGaul.py
index dc79685..3742866 100644
--- a/Tigger/Models/Formats/PyBDSMGaul.py
+++ b/Tigger/Models/Formats/PyBDSMGaul.py
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
#
-#% $Id$
+# % $Id$
#
#
# Copyright (C) 2002-2011
@@ -24,15 +24,11 @@
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
-import sys,re
+import re
+import sys
-import Kittens.utils
-
-from Tigger.Models import ModelClasses
-from Tigger.Models import SkyModel
-from Tigger import Coordinates
import Tigger.Models.Formats
-from Tigger.Models.Formats import dprint,dprintf,ASCII
+from Tigger.Models.Formats import dprint, ASCII
"""Loads a PyBDSM-format .gaul file. Gaul files are essentially ASCII tables with a very specific naming convention."""
@@ -42,56 +38,57 @@
# E_Isl_Total_flux Isl_rms Isl_mean Resid_Isl_rms Resid_Isl_mean S_Code
format_mapping = dict(
- Gaus_id="name",
- RA="ra_d",E_RA="ra_err_d",DEC="dec_d",E_DEC="dec_err_d",
- Total_flux="i",E_Total_flux="i_err",
- Total_Q="q",E_Total_Q="q_err",
- Total_U="u",E_Total_U="u_err",
- Total_V="v",E_Total_V="v_err",
- DC_Maj="emaj_d",DC_Min="emin_d",DC_PA="pa_d",
- E_DC_Maj="emaj_err_d",E_DC_Min="emin_err_d",E_DC_PA="pa_err_d",
- SpI="spi",Spec_Indx="spi",E_Spec_Indx="spi_err",
- S_Code=":str:_pybdsm_S_Code"
-);
+ Gaus_id="name",
+ RA="ra_d", E_RA="ra_err_d", DEC="dec_d", E_DEC="dec_err_d",
+ Total_flux="i", E_Total_flux="i_err",
+ Total_Q="q", E_Total_Q="q_err",
+ Total_U="u", E_Total_U="u_err",
+ Total_V="v", E_Total_V="v_err",
+ DC_Maj="emaj_d", DC_Min="emin_d", DC_PA="pa_d",
+ E_DC_Maj="emaj_err_d", E_DC_Min="emin_err_d", E_DC_PA="pa_err_d",
+ SpI="spi", Spec_Indx="spi", E_Spec_Indx="spi_err",
+ S_Code=":str:_pybdsm_S_Code"
+)
+
+def load(filename, freq0=None, **kw):
+ """Imports a gaul table
+ The 'freq0' argument supplies a default reference frequency (if one is not contained in the file.)
+ If 'center_on_brightest' is True, the mpodel field center will be set to the brightest source.
+ 'min_extent' is minimal source extent (in radians), above which a source will be treated as a Gaussian rather than a point component.
+ """
+ srclist = []
+ id = None
+ dprint(1, "importing PyBDSM gaul/srl file", filename)
+ format = {}
+ extension = filename.split(".")[-1]
+ if extension == "srl":
+ format_mapping['Source_id'] = format_mapping.pop('Gaus_id')
+ id = "Source_id"
+ # look for format string and reference freq, and build up format dict
+ for line in file(filename):
+ m = re.match("# Reference frequency .*?([0-9.eE+-]+)\s*Hz", line)
+ if m:
+ freq0 = kw['freq0'] = freq0 or float(m.group(1))
+ dprint(2, "found reference frequency %g" % freq0)
+ elif re.match("#(\s*[\w:]+\s+)+", line) and line.find(id if id else "Gaus_id") > 0:
+ dprint(2, "found format string", line)
+ fields = dict([(name, i) for i, name in enumerate(line[1:].split())])
+ # map known fields to their ASCII equivalents, the rest copy as custom float attributes with
+ # a "pybdsm_" prefix
+ for i, name in enumerate(line[1:].split()):
+ if name in format_mapping:
+ dprint(2, "Field", format_mapping[name], name, "is column", i)
+ format[format_mapping[name]] = i
+ else:
+ format[":float:_pybdsm_%s" % name] = i
+ if format and freq0:
+ break
+ if not format:
+ raise ValueError, "this .gaul file does not appear to contain a format string"
+ # call ASCII.load() function now that we have the format dict
+ kw['format'] = format
+ return ASCII.load(filename, **kw)
-def load (filename, freq0=None,**kw):
- """Imports a gaul table
- The 'freq0' argument supplies a default reference frequency (if one is not contained in the file.)
- If 'center_on_brightest' is True, the mpodel field center will be set to the brightest source.
- 'min_extent' is minimal source extent (in radians), above which a source will be treated as a Gaussian rather than a point component.
- """
- srclist = [];
- id = None
- dprint(1,"importing PyBDSM gaul/srl file",filename);
- format = {};
- extension = filename.split(".")[-1]
- if extension == "srl":
- format_mapping['Source_id'] = format_mapping.pop('Gaus_id')
- id = "Source_id"
- # look for format string and reference freq, and build up format dict
- for line in file(filename):
- m = re.match("# Reference frequency .*?([0-9.eE+-]+)\s*Hz",line);
- if m:
- freq0 = kw['freq0'] = freq0 or float(m.group(1));
- dprint(2,"found reference frequency %g"%freq0);
- elif re.match("#(\s*[\w:]+\s+)+",line) and line.find(id if id else "Gaus_id") > 0:
- dprint(2,"found format string",line);
- fields = dict([ (name,i) for i,name in enumerate(line[1:].split()) ]);
- # map known fields to their ASCII equivalents, the rest copy as custom float attributes with
- # a "pybdsm_" prefix
- for i,name in enumerate(line[1:].split()):
- if name in format_mapping:
- dprint(2,"Field",format_mapping[name],name,"is column",i)
- format[format_mapping[name]] = i;
- else:
- format[":float:_pybdsm_%s"%name] = i;
- if format and freq0:
- break;
- if not format:
- raise ValueError,"this .gaul file does not appear to contain a format string"
- # call ASCII.load() function now that we have the format dict
- kw['format'] = format;
- return ASCII.load(filename,**kw)
-Tigger.Models.Formats.registerFormat("Gaul",load,"PyBDSM .gaul/.srl file",(".gaul",".srl",));
+Tigger.Models.Formats.registerFormat("Gaul", load, "PyBDSM .gaul/.srl file", (".gaul", ".srl",))
diff --git a/Tigger/Models/Formats/__init__.py b/Tigger/Models/Formats/__init__.py
index 9949740..ea9e3a9 100644
--- a/Tigger/Models/Formats/__init__.py
+++ b/Tigger/Models/Formats/__init__.py
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
#
-#% $Id$
+# % $Id$
#
#
# Copyright (C) 2002-2011
@@ -24,97 +24,106 @@
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
-import Kittens.utils
-import os.path
-import sys
import traceback
-_verbosity = Kittens.utils.verbosity(name="lsmformats");
-dprint = _verbosity.dprint;
-dprintf = _verbosity.dprintf;
-
-Formats = {};
-_FormatList = [];
-_FormatsInitialized = False;
-
-def _initFormats ():
- """Initializes all known formats by importing their modules""";
- global _FormatsInitialized;
- if not _FormatsInitialized:
- for format in [ "ModelHTML","ASCII","BBS","NEWSTAR","AIPSCC","AIPSCCFITS","PyBDSMGaul" ]:
- try:
- __import__(format,globals(),locals());
- except:
- traceback.print_exc();
- print "Error loading support for format '%s', see above. Format will not be available."%format;
- _FormatsInitialized = True;
-
-def registerFormat (name,import_func,doc,extensions,export_func=None):
- """Registers an external format, with an import function""";
- global Formats;
- Formats[name] = (import_func,export_func,doc,extensions);
- _FormatList.append(name);
-
-def getFormat (name):
- """Gets file format by name. Returns name,import_func,export_func,docstring if found, None,None,None,None otherwise.""";
- _initFormats();
- if name not in Formats:
- return None,None,None,None;
- import_func,export_func,doc,extensions = Formats[name];
- return name,import_func,export_func,doc;
-
-def getFormatExtensions (name):
- """Gets file format by name. Returns name,import_func,export_func,docstring if found, None,None,None,None otherwise.""";
- _initFormats();
- if name not in Formats:
- return None;
- import_func,export_func,doc,extensions = Formats[name];
- return extensions;
-
-def determineFormat (filename):
- """Tries to determine file format by filename. Returns name,import_func,export_func,docstring if found, None,None,None,None otherwise.""";
- _initFormats();
- for name,(import_func,export_func,doc,extensions) in Formats.iteritems():
- for ext in extensions:
- if filename.endswith(ext):
- return name,import_func,export_func,doc;
- return None,None,None,None;
-
-def listFormats ():
- _initFormats();
- return _FormatList;
-
-def listFormatsFull ():
- _initFormats();
- return [ (name,Formats[name]) for name in _FormatList ];
-
-def resolveFormat (filename,format):
- """Helper function, resolves format/filename arguments to a format tuple""";
- _initFormats();
- if format:
- name,import_func,export_func,doc = getFormat(format);
- if not import_func:
- raise TypeError("Unknown model format '%s'"%format);
- else:
- name,import_func,export_func,doc = determineFormat(filename);
- if not import_func:
- raise TypeError("Cannot determine model format from filename '%s'"%filename);
- return name,import_func,export_func,doc;
-
+import Kittens.utils
+
+_verbosity = Kittens.utils.verbosity(name="lsmformats")
+dprint = _verbosity.dprint
+dprintf = _verbosity.dprintf
+
+Formats = {}
+_FormatList = []
+_FormatsInitialized = False
+
+
+def _initFormats():
+ """Initializes all known formats by importing their modules"""
+ global _FormatsInitialized
+ if not _FormatsInitialized:
+ for format in ["ModelHTML", "ASCII", "BBS", "NEWSTAR", "AIPSCC", "AIPSCCFITS", "PyBDSMGaul"]:
+ try:
+ __import__(format, globals(), locals())
+ except:
+ traceback.print_exc()
+ print "Error loading support for format '%s', see above. Format will not be available." % format
+ _FormatsInitialized = True
+
+
+def registerFormat(name, import_func, doc, extensions, export_func=None):
+ """Registers an external format, with an import function"""
+ global Formats
+ Formats[name] = (import_func, export_func, doc, extensions)
+ _FormatList.append(name)
+
+
+def getFormat(name):
+ """Gets file format by name. Returns name,import_func,export_func,docstring if found, None,None,None,None otherwise."""
+ _initFormats()
+ if name not in Formats:
+ return None, None, None, None
+ import_func, export_func, doc, extensions = Formats[name]
+ return name, import_func, export_func, doc
+
+
+def getFormatExtensions(name):
+ """Gets file format by name. Returns name,import_func,export_func,docstring if found, None,None,None,None otherwise."""
+ _initFormats()
+ if name not in Formats:
+ return None
+ import_func, export_func, doc, extensions = Formats[name]
+ return extensions
+
+
+def determineFormat(filename):
+ """Tries to determine file format by filename. Returns name,import_func,export_func,docstring if found, None,None,None,None otherwise."""
+ _initFormats()
+ for name, (import_func, export_func, doc, extensions) in Formats.iteritems():
+ for ext in extensions:
+ if filename.endswith(ext):
+ return name, import_func, export_func, doc
+ return None, None, None, None
+
+
+def listFormats():
+ _initFormats()
+ return _FormatList
+
+
+def listFormatsFull():
+ _initFormats()
+ return [(name, Formats[name]) for name in _FormatList]
+
+
+def resolveFormat(filename, format):
+ """Helper function, resolves format/filename arguments to a format tuple"""
+ _initFormats()
+ if format:
+ name, import_func, export_func, doc = getFormat(format)
+ if not import_func:
+ raise TypeError("Unknown model format '%s'" % format)
+ else:
+ name, import_func, export_func, doc = determineFormat(filename)
+ if not import_func:
+ raise TypeError("Cannot determine model format from filename '%s'" % filename)
+ return name, import_func, export_func, doc
+
+
# provide some convenience methods
-def load (filename,format=None,verbose=True):
- """Loads a sky model."""
- name,import_func,export_func,doc = resolveFormat(filename,format);
- if not import_func:
- raise TypeError("Unknown model format '%s'"%format);
- if verbose:
- print "Loading %s: %s"%(filename,doc);
- return import_func(filename);
-
-def save (model,filename,format=None,verbose=True):
- """Saves a sky model."""
- name,import_func,export_func,doc = resolveFormat(filename,format);
- if verbose:
- print "Saving %s: %s"%(filename,doc);
- return export_func(model,filename);
+def load(filename, format=None, verbose=True):
+ """Loads a sky model."""
+ name, import_func, export_func, doc = resolveFormat(filename, format)
+ if not import_func:
+ raise TypeError("Unknown model format '%s'" % format)
+ if verbose:
+ print "Loading %s: %s" % (filename, doc)
+ return import_func(filename)
+
+
+def save(model, filename, format=None, verbose=True):
+ """Saves a sky model."""
+ name, import_func, export_func, doc = resolveFormat(filename, format)
+ if verbose:
+ print "Saving %s: %s" % (filename, doc)
+ return export_func(model, filename)
diff --git a/Tigger/Models/ModelClasses.py b/Tigger/Models/ModelClasses.py
index 0e4d311..c0b8111 100644
--- a/Tigger/Models/ModelClasses.py
+++ b/Tigger/Models/ModelClasses.py
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
#
-#% $Id$
+# % $Id$
#
#
# Copyright (C) 2002-2011
@@ -24,413 +24,444 @@
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
+import copy
import math
-import os.path
+
import numpy
-import copy
+import os.path
from Tigger import startup_dprint
-startup_dprint(1,"starting ModelClasses");
-
-DEG = 180/math.pi;
-
-AtomicTypes = dict(bool=bool,int=int,float=float,complex=complex,str=str,list=list,tuple=tuple,dict=dict,NoneType=lambda x:None);
-
-class ModelItem (object):
- """ModelItem is a base class for all model items. ModelItem provides functions
- for saving, loading, and initializing items, using class attributes that describe the
- item's structure.
- A ModelItem has a number of named attributes (both mandatory and optional), which are
- sufficient to fully describe the item.
- A ModelItem is constructed by specifying its attribute values. Mandatory attributes are
- passed as positional arguments to the constructor, while optional attributes are passed
- as keyword arguments.
- 'mandatory_attrs' is a class data member that provides a list of mandatory attributes.
- 'optional_attrs' is a class data member that provides a dict of optional attributes and their
- default values (i.e. their value when missing). Subclasses are expected to redefine these
- attributes.
- """;
-
- # list of mandatory item attributes
- mandatory_attrs = [];
- # dict of optional item attributes (key is name, value is default value)
- optional_attrs = {};
- # True is arbitrary extra attributes are allowed
- allow_extra_attrs = False;
- # dict of rendertags for attributes. Default is to render ModelItems with the "A" tag,
- # and atomic attributes with the "TD" tag
- attr_rendertag = {};
- # dict of verbosities for attributes. If an entry is present for a given attribute, then
- # the attribute's text representation will be rendered within its tags
- attr_verbose = {};
-
- def __init__ (self,*args,**kws):
- """The default ModelItem constructor treats its positional arguments as a list of
- mandatory attributes, and its keyword arguments as optional attributes""";
- # check for argument errors
- if len(args) < len(self.mandatory_attrs):
- raise TypeError,"too few arguments in constructor of "+self.__class__.__name__;
- if len(args) > len(self.mandatory_attrs):
- raise TypeError,"too many arguments in constructor of "+self.__class__.__name__;
- # set mandatory attributes from argument list
- for attr,value in zip(self.mandatory_attrs,args):
- if not isinstance(value,AllowedTypesTuple):
- raise TypeError,"invalid type %s for attribute %s (class %s)"%(type(value).__name__,attr,self.__class__.__name__);
- setattr(self,attr,value);
- # set optional attributes from keywords
- for kw,default in self.optional_attrs.iteritems():
- value = kws.pop(kw,default);
- if not isinstance(value,AllowedTypesTuple):
- raise TypeError,"invalid type %s for attribute %s (class %s)"%(type(value).__name__,kw,self.__class__.__name__);
- setattr(self,kw,value);
- # set extra attributes, if any are left
- self._extra_attrs = set();
- if self.allow_extra_attrs:
- for kw,value in kws.iteritems():
- if not isinstance(value,AllowedTypesTuple):
- raise TypeError,"invalid type %s for attribute %s (class %s)"%(type(value).__name__,kw,self.__class__.__name__);
- self.setAttribute(kw,value);
- elif kws:
- raise TypeError,"unknown parameters %s in constructor of %s"%(','.join(kws.keys()),self.__class__.__name__);
- # other init
- self._signaller = None;
- self._connections = set();
-
- def enableSignals (self):
- """Enables Qt signals for this object.""";
- import PyQt4.Qt;
- self._signaller = PyQt4.Qt.QObject();
-
- def signalsEnabled (self):
- return bool(self._signaller);
-
- def connect (self,signal_name,receiver,reconnect=False):
- """Connects SIGNAL from object to specified receiver slot. If reconnect is True, allows duplicate connections.""";
- if not self._signaller:
- raise RuntimeError,"ModelItem.connect() called before enableSignals()";
- import PyQt4.Qt;
- if reconnect or (signal_name,receiver) not in self._connections:
- self._connections.add((signal_name,receiver));
- PyQt4.Qt.QObject.connect(self._signaller,PyQt4.Qt.SIGNAL(signal_name),receiver);
-
- def emit (self,signal_name,*args):
- """Emits named SIGNAL from this object .""";
- if not self._signaller:
- raise RuntimeError,"ModelItem.emit() called before enableSignals()";
- import PyQt4.Qt;
- self._signaller.emit(PyQt4.Qt.SIGNAL(signal_name),*args);
-
- def registerClass (classobj):
- if not isinstance(classobj,type):
- raise TypeError,"registering invalid class object: %s"%classobj;
- globals()[classobj.__name__] = classobj;
- AllowedTypes[classobj.__name__] = classobj;
- AllowedTypesTuple = tuple(AllowedTypes.itervalues());
- registerClass = classmethod(registerClass);
-
- def setAttribute (self,attr,value):
- if attr not in self.mandatory_attrs and attr not in self.optional_attrs:
- self._extra_attrs.add(attr);
- setattr(self,attr,value);
-
- def removeAttribute (self,attr):
- if hasattr(self,attr):
- delattr(self,attr);
- self._extra_attrs.discard(attr);
-
- def getExtraAttributes (self):
- """Returns list of extra attributes, as (attr,value) tuples""";
- return [ (attr,getattr(self,attr)) for attr in self._extra_attrs ];
-
- def getAttributes (self):
- """Returns list of all attributes (mandatory+optional+extra), as (attr,value) tuples""";
- attrs = [ (attr,getattr(self,attr)) for attr in self.mandatory_attrs ];
- for attr,default in self.optional_attrs.iteritems():
- val = getattr(self,attr,default);
- if val != default:
- attrs.append((attr,val));
- attrs += [ (attr,getattr(self,attr)) for attr in self._extra_attrs ];
- return attrs;
-
- def __copy__ (self):
- """Returns copy of object. Copies all attributes.""";
- attrs = self.optional_attrs.copy();
- attrs.update(self.getExtraAttributes());
- return self.__class__( *[ getattr(self,attr) for attr in self.mandatory_attrs],**attrs);
-
- def __deepcopy__ (self,memodict):
- """Returns copy of object. Copies all attributes.""";
- attrs = self.optional_attrs.copy();
- attrs.update(self.getExtraAttributes());
- attrs = copy.deepcopy(attrs,memodict);
- return self.__class__( *[ copy.deepcopy(getattr(self,attr),memodict) for attr in self.mandatory_attrs],**attrs);
-
- def copy (self,deep=True):
- if deep:
- return copy.deepcopy(self);
- else:
- return __copy__(self);
-
- def strAttributes (self,sep=",",label=True,
- float_format="%.2g",complex_format="%.2g%+.2gj"):
- """Renders attributes as string. Child classes may redefine this to make a better string representation.
- If label=True, uses "attrname=value", else uses "value".
- 'sep' specifies a separator.
- """;
- fields = [];
- for attr,val in self.getAttributes():
- ss = (label and "%s="%attr) or "";
- if isinstance(val,(float,int)):
- ss += float_format%val;
- elif isinstance(val,complex):
- ss += complex_format%val;
- else:
- ss += str(val);
- fields.append(ss);
- return sep.join(fields);
-
- def strDesc (self,**kw):
- """Returns string describing the object, used in GUIs and such. Default implementation calls strAttributes()."""
- return strAttributes(**kw);
-
- def _resolveTags (self,tags,attr=None):
- """helper function called from renderMarkup() and renderAttrMarkup() below to
- figure out which HTML tags to enclose a value in. Return value is tuple of (tag,endtag,rem_tags), where
- tag is the HTML tag to use (or None for default, usually "A"), endtag is the closing tag (including <> and whitespace, if any),
- and rem_tags is to be passed to child items' resolveMarkup() """;
- # figure out enclosing tag
- if not tags:
- tag,tags = None,None; # use default
- elif isinstance(tags,str):
- tag,tags = tags,None; # one tag supplied, use that here and use defaults for sub-items
- elif isinstance(tags,(list,tuple)):
- tag,tags = tags[0],tags[1:]; # stack of tags supplied: use first here, pass rest to sub-items
- else:
- raise ValueError,"invalid 'tags' parameter of type "+str(type(tags));
- # if tag is None, use default
- tag = tag or self.attr_rendertag.get(attr,None) or "A";
- if tag.endswith('\n'):
- tag = tag[:-1];
- endtag = "%s>\n"%tag;
- else:
- endtag = "%s> "%tag;
- return tag,endtag,tags;
-
- def renderMarkup (self,tags=None,attrname=None):
- """Makes a markup string corresponding to the model item.
- 'tags' is the HTML tag to use.
- If 'verbose' is not None, a text representation of the item (using str()) will be included
- as HTML text between the opening and closing tags.
- """;
- tag,endtag,tags = self._resolveTags(tags,attrname);
- # opening tag
- markup = "<%s mdltype=%s "%(tag,type(self).__name__);
- if attrname is not None:
- markup += "mdlattr=\"%s\" "%attrname;
- markup +=">";
- # render attrname as comment
- if attrname:
- if tag == "TR":
- markup += "%s | "%attrname;
- else:
- markup += "%s: "%attrname;
- # write mandatory attributes
- for attr in self.mandatory_attrs:
- markup += self.renderAttrMarkup(attr,getattr(self,attr),tags=tags,mandatory=True);
- # write optional attributes only wheh non-default
- for attr,default in sorted(self.optional_attrs.iteritems()):
- val = getattr(self,attr,default);
- if val != default:
- markup += self.renderAttrMarkup(attr,val,tags=tags);
- # write extra attributes
- for attr in self._extra_attrs:
- markup += self.renderAttrMarkup(attr,getattr(self,attr),tags=tags);
- # closing tag
- markup += endtag;
- return markup;
-
- numpy_int_types = tuple([
- getattr(numpy,"%s%d"%(t,d)) for t in "int","uint" for d in 8,16,32,64
- if hasattr(numpy,"%s%d"%(t,d))
- ]);
- numpy_float_types = tuple([
- getattr(numpy,"float%d"%d) for d in 32,64,96,128
- if hasattr(numpy,"float%d"%d)
- ]);
-
- def renderAttrMarkup (self,attr,value,tags=None,verbose=None,mandatory=False):
- # render ModelItems recursively via renderMarkup() above
- if isinstance(value,ModelItem):
- return value.renderMarkup(tags,attrname=(not mandatory and attr) or None);
- # figure out enclosing tags
- tag,endtag,tags = self._resolveTags(tags,attr);
- # convert numpy types to float or complexes
- if isinstance(value,self.numpy_int_types):
- value = int(value);
- elif isinstance(value,self.numpy_float_types):
- value = float(value);
- elif numpy.iscomplexobj(value):
- value = complex(value);
- # render opening tags
- markup = "<%s mdltype=%s "%(tag,type(value).__name__);
- if not mandatory:
- markup += "mdlattr=\"%s\" "%attr;
- # if rendering table row, use TD to render comments
- if verbose is None:
- verbose = attr; # and self.attr_verbose.get(attr);
- if tag == "TR":
- comment = "%s | ";
- else:
- comment = "%s ";
- # render lists or tuples iteratively
- if isinstance(value,(list,tuple)):
- markup += ">";
- if verbose:
- markup += comment%(verbose+":");
- for i,item in enumerate(value):
- markup += self.renderAttrMarkup(str(i),item,mandatory=True,tags=tags);
- # render dicts iteratively
- elif isinstance(value,dict):
- markup += ">";
- if verbose:
- markup += comment%(verbose+":");
- for key,item in sorted(value.iteritems()):
- markup += self.renderAttrMarkup(key,item,tags=tags);
- # render everything else inline
- else:
- if isinstance(value,str):
- markup += "mdlval=\"'%s'\">"%value.replace("\"","\\\"").replace("'","\\'");
- else:
- markup += "mdlval=\"%s\">"%repr(value);
- if verbose is attr:
- markup += comment%':'.join((attr,str(value)));
- else:
- markup += comment%''.join((verbose,str(value)));
- markup += endtag;
- return markup;
-
-def _deg_to_dms (x,prec=0.01):
- """Converts x (in degrees) into d,m,s tuple, where d and m are ints.
- prec gives the precision, in arcseconds."""
- mins,secs = divmod(round(x*3600/prec)*prec,60);
- mins = int(mins);
- degs,mins = divmod(mins,60);
- return degs,mins,secs;
-
-class Position (ModelItem):
- mandatory_attrs = [ "ra","dec" ];
- optional_attrs = dict(ra_err=None,dec_err=None);
-
- @staticmethod
- def ra_hms_static (rad,scale=12,prec=0.01):
- """Returns RA as tuple of (h,m,s)""";
- # convert negative values
- while rad < 0:
- rad += 2*math.pi;
- # convert to hours
- rad *= scale/math.pi;
- return _deg_to_dms(rad,prec);
-
- def ra_hms (self,prec=0.01):
- return self.ra_hms_static(self.ra,scale=12,prec=prec);
-
- def ra_dms (self,prec=0.01):
- return self.ra_hms_static(self.ra,scale=180,prec=prec);
-
- @staticmethod
- def dec_dms_static (rad,prec=0.01):
- return Position.dec_sdms_static(rad,prec)[1:];
-
- @staticmethod
- def dec_sdms_static (rad,prec=0.01):
- """Returns Dec as tuple of (sign,d,m,s). Sign is "+" or "-".""";
- sign = "-" if rad<0 else "+";
- d,m,s = _deg_to_dms(abs(rad)*DEG,prec);
- return (sign,d,m,s);
-
- def dec_sdms (self,prec=0.01):
- return self.dec_sdms_static(self.dec,prec);
-
-class Flux (ModelItem):
- mandatory_attrs = [ "I" ];
- optional_attrs = dict(I_err=None);
- def rescale (self,scale):
- self.I *= scale;
-
-class Polarization (Flux):
- mandatory_attrs = Flux.mandatory_attrs + [ "Q","U","V" ];
- optional_attrs = dict(I_err=None,Q_err=None,U_err=None,V_err=None);
- def rescale (self,scale):
- for stokes in "IQUV":
- setattr(self,stokes,getattr(self,stokes)*scale);
-
-class PolarizationWithRM (Polarization):
- mandatory_attrs = Polarization.mandatory_attrs + [ "rm","freq0" ];
- optional_attrs = dict(Polarization.optional_attrs,rm_err=None)
-
-class Spectrum (ModelItem):
- """The Spectrum class is an abstract representation of spectral information. The base implementation corresponds
- to a flat spectrum.
- """;
- def normalized_intensity (self,freq):
- """Returns the normalized intensity for a given frequency,normalized to unity at the reference frequency (if any)"""
- return 1;
-
-class SpectralIndex (Spectrum):
- mandatory_attrs = [ "spi","freq0" ];
- optional_attrs = dict(spi_err=None);
- def normalized_intensity (self,freq):
- """Returns the normalized intensity for a given frequency, normalized to unity at the reference frequency (if any)"""
- if isinstance(self.spi,(list,tuple)):
- spi = self.spi[0];
- logfreq = numpy.log(freq/self.freq0);
- for i,x in enumerate(self.spi[1:]):
- spi = spi + x*(logfreq**(i+1));
- else:
- spi = self.spi;
- return (freq/self.freq0)**spi;
-
-class Shape (ModelItem):
- """Abstract base class for a source's brightness distribution.
- The ex/ey/pa attributes give the overall shape of the source."""
- mandatory_attrs = [ "ex","ey","pa" ];
- optional_attrs = dict(ex_err=None,ey_err=None,pa_err=None);
- def getShape (self):
- return self.ex,self.ey,self.pa
- def getShapeErr (self):
- err = [ getattr(self,a+'_err',None) for a in self.mandatory_attrs ]
- if all([ a is None for a in err ]):
- return None
- return tuple(err)
-
-class Gaussian (Shape):
- typecode = "Gau";
- def strDesc (self,delimiters=('"',"x","@","deg"),**kw):
- return """%.2g%s%s%.2g%s%s%d%s"""%(self.ex*DEG*3600,delimiters[0],delimiters[1],self.ey*DEG*3600,delimiters[0],
- delimiters[2],round(self.pa*DEG),delimiters[3]);
- def strDescErr (self,delimiters=('"',"x","@","deg"),**kw):
- err = self.getShapeErr();
- return err and """%.2g%s%s%.2g%s%s%d%s"""%(err[0]*DEG*3600,delimiters[0],delimiters[1],err[1]*DEG*3600,delimiters[0],
- delimiters[2],round(err[2]*DEG),delimiters[3]);
-
-
-class FITSImage (Shape):
- typecode = "FITS";
- mandatory_attrs = Shape.mandatory_attrs + [ "filename","nx","ny" ];
- optional_attrs = dict(pad=2);
- def strDesc (self,**kw):
- return """%s %dx%d"""%(os.path.basename(self.filename),self.nx,self.ny);
-
-startup_dprint(1,"end of class defs");
+
+startup_dprint(1, "starting ModelClasses")
+
+DEG = 180 / math.pi
+
+AtomicTypes = dict(bool=bool, int=int, float=float, complex=complex, str=str, list=list, tuple=tuple, dict=dict,
+ NoneType=lambda x: None)
+
+
+class ModelItem(object):
+ """ModelItem is a base class for all model items. ModelItem provides functions
+ for saving, loading, and initializing items, using class attributes that describe the
+ item's structure.
+ A ModelItem has a number of named attributes (both mandatory and optional), which are
+ sufficient to fully describe the item.
+ A ModelItem is constructed by specifying its attribute values. Mandatory attributes are
+ passed as positional arguments to the constructor, while optional attributes are passed
+ as keyword arguments.
+ 'mandatory_attrs' is a class data member that provides a list of mandatory attributes.
+ 'optional_attrs' is a class data member that provides a dict of optional attributes and their
+ default values (i.e. their value when missing). Subclasses are expected to redefine these
+ attributes.
+ """
+
+ # list of mandatory item attributes
+ mandatory_attrs = []
+ # dict of optional item attributes (key is name, value is default value)
+ optional_attrs = {}
+ # True is arbitrary extra attributes are allowed
+ allow_extra_attrs = False
+ # dict of rendertags for attributes. Default is to render ModelItems with the "A" tag,
+ # and atomic attributes with the "TD" tag
+ attr_rendertag = {}
+ # dict of verbosities for attributes. If an entry is present for a given attribute, then
+ # the attribute's text representation will be rendered within its tags
+ attr_verbose = {}
+
+ def __init__(self, *args, **kws):
+ """The default ModelItem constructor treats its positional arguments as a list of
+ mandatory attributes, and its keyword arguments as optional attributes"""
+ # check for argument errors
+ if len(args) < len(self.mandatory_attrs):
+ raise TypeError, "too few arguments in constructor of " + self.__class__.__name__
+ if len(args) > len(self.mandatory_attrs):
+ raise TypeError, "too many arguments in constructor of " + self.__class__.__name__
+ # set mandatory attributes from argument list
+ for attr, value in zip(self.mandatory_attrs, args):
+ if not isinstance(value, AllowedTypesTuple):
+ raise TypeError, "invalid type %s for attribute %s (class %s)" % (
+ type(value).__name__, attr, self.__class__.__name__)
+ setattr(self, attr, value)
+ # set optional attributes from keywords
+ for kw, default in self.optional_attrs.iteritems():
+ value = kws.pop(kw, default)
+ if not isinstance(value, AllowedTypesTuple):
+ raise TypeError, "invalid type %s for attribute %s (class %s)" % (
+ type(value).__name__, kw, self.__class__.__name__)
+ setattr(self, kw, value)
+ # set extra attributes, if any are left
+ self._extra_attrs = set()
+ if self.allow_extra_attrs:
+ for kw, value in kws.iteritems():
+ if not isinstance(value, AllowedTypesTuple):
+ raise TypeError, "invalid type %s for attribute %s (class %s)" % (
+ type(value).__name__, kw, self.__class__.__name__)
+ self.setAttribute(kw, value)
+ elif kws:
+ raise TypeError, "unknown parameters %s in constructor of %s" % (
+ ','.join(kws.keys()), self.__class__.__name__)
+ # other init
+ self._signaller = None
+ self._connections = set()
+
+ def enableSignals(self):
+ """Enables Qt signals for this object."""
+ import PyQt4.Qt
+ self._signaller = PyQt4.Qt.QObject()
+
+ def signalsEnabled(self):
+ return bool(self._signaller)
+
+ def connect(self, signal_name, receiver, reconnect=False):
+ """Connects SIGNAL from object to specified receiver slot. If reconnect is True, allows duplicate connections."""
+ if not self._signaller:
+ raise RuntimeError, "ModelItem.connect() called before enableSignals()"
+ import PyQt4.Qt
+ if reconnect or (signal_name, receiver) not in self._connections:
+ self._connections.add((signal_name, receiver))
+ PyQt4.Qt.QObject.connect(self._signaller, PyQt4.Qt.SIGNAL(signal_name), receiver)
+
+ def emit(self, signal_name, *args):
+ """Emits named SIGNAL from this object ."""
+ if not self._signaller:
+ raise RuntimeError, "ModelItem.emit() called before enableSignals()"
+ import PyQt4.Qt
+ self._signaller.emit(PyQt4.Qt.SIGNAL(signal_name), *args)
+
+ def registerClass(classobj):
+ if not isinstance(classobj, type):
+ raise TypeError, "registering invalid class object: %s" % classobj
+ globals()[classobj.__name__] = classobj
+ AllowedTypes[classobj.__name__] = classobj
+ AllowedTypesTuple = tuple(AllowedTypes.itervalues())
+
+ registerClass = classmethod(registerClass)
+
+ def setAttribute(self, attr, value):
+ if attr not in self.mandatory_attrs and attr not in self.optional_attrs:
+ self._extra_attrs.add(attr)
+ setattr(self, attr, value)
+
+ def removeAttribute(self, attr):
+ if hasattr(self, attr):
+ delattr(self, attr)
+ self._extra_attrs.discard(attr)
+
+ def getExtraAttributes(self):
+ """Returns list of extra attributes, as (attr,value) tuples"""
+ return [(attr, getattr(self, attr)) for attr in self._extra_attrs]
+
+ def getAttributes(self):
+ """Returns list of all attributes (mandatory+optional+extra), as (attr,value) tuples"""
+ attrs = [(attr, getattr(self, attr)) for attr in self.mandatory_attrs]
+ for attr, default in self.optional_attrs.iteritems():
+ val = getattr(self, attr, default)
+ if val != default:
+ attrs.append((attr, val))
+ attrs += [(attr, getattr(self, attr)) for attr in self._extra_attrs]
+ return attrs
+
+ def __copy__(self):
+ """Returns copy of object. Copies all attributes."""
+ attrs = self.optional_attrs.copy()
+ attrs.update(self.getExtraAttributes())
+ return self.__class__(*[getattr(self, attr) for attr in self.mandatory_attrs], **attrs)
+
+ def __deepcopy__(self, memodict):
+ """Returns copy of object. Copies all attributes."""
+ attrs = self.optional_attrs.copy()
+ attrs.update(self.getExtraAttributes())
+ attrs = copy.deepcopy(attrs, memodict)
+ return self.__class__(*[copy.deepcopy(getattr(self, attr), memodict) for attr in self.mandatory_attrs],
+ **attrs)
+
+ def copy(self, deep=True):
+ if deep:
+ return copy.deepcopy(self)
+ else:
+ return self.__copy__()
+
+ def strAttributes(self, sep=",", label=True,
+ float_format="%.2g", complex_format="%.2g%+.2gj"):
+ """Renders attributes as string. Child classes may redefine this to make a better string representation.
+ If label=True, uses "attrname=value", else uses "value".
+ 'sep' specifies a separator.
+ """
+ fields = []
+ for attr, val in self.getAttributes():
+ ss = (label and "%s=" % attr) or ""
+ if isinstance(val, (float, int)):
+ ss += float_format % val
+ elif isinstance(val, complex):
+ ss += complex_format % val
+ else:
+ ss += str(val)
+ fields.append(ss)
+ return sep.join(fields)
+
+ def strDesc(self, **kw):
+ """Returns string describing the object, used in GUIs and such. Default implementation calls strAttributes()."""
+ return self.strAttributes(**kw)
+
+ def _resolveTags(self, tags, attr=None):
+ """helper function called from renderMarkup() and renderAttrMarkup() below to
+ figure out which HTML tags to enclose a value in. Return value is tuple of (tag,endtag,rem_tags), where
+ tag is the HTML tag to use (or None for default, usually "A"), endtag is the closing tag (including <> and whitespace, if any),
+ and rem_tags is to be passed to child items' resolveMarkup() """
+ # figure out enclosing tag
+ if not tags:
+ tag, tags = None, None; # use default
+ elif isinstance(tags, str):
+ tag, tags = tags, None; # one tag supplied, use that here and use defaults for sub-items
+ elif isinstance(tags, (list, tuple)):
+ tag, tags = tags[0], tags[1:]; # stack of tags supplied: use first here, pass rest to sub-items
+ else:
+ raise ValueError, "invalid 'tags' parameter of type " + str(type(tags))
+ # if tag is None, use default
+ tag = tag or self.attr_rendertag.get(attr, None) or "A"
+ if tag.endswith('\n'):
+ tag = tag[:-1]
+ endtag = "%s>\n" % tag
+ else:
+ endtag = "%s> " % tag
+ return tag, endtag, tags
+
+ def renderMarkup(self, tags=None, attrname=None):
+ """Makes a markup string corresponding to the model item.
+ 'tags' is the HTML tag to use.
+ If 'verbose' is not None, a text representation of the item (using str()) will be included
+ as HTML text between the opening and closing tags.
+ """
+ tag, endtag, tags = self._resolveTags(tags, attrname)
+ # opening tag
+ markup = "<%s mdltype=%s " % (tag, type(self).__name__)
+ if attrname is not None:
+ markup += "mdlattr=\"%s\" " % attrname
+ markup += ">"
+ # render attrname as comment
+ if attrname:
+ if tag == "TR":
+ markup += "%s | " % attrname
+ else:
+ markup += "%s: " % attrname
+ # write mandatory attributes
+ for attr in self.mandatory_attrs:
+ markup += self.renderAttrMarkup(attr, getattr(self, attr), tags=tags, mandatory=True)
+ # write optional attributes only wheh non-default
+ for attr, default in sorted(self.optional_attrs.iteritems()):
+ val = getattr(self, attr, default)
+ if val != default:
+ markup += self.renderAttrMarkup(attr, val, tags=tags)
+ # write extra attributes
+ for attr in self._extra_attrs:
+ markup += self.renderAttrMarkup(attr, getattr(self, attr), tags=tags)
+ # closing tag
+ markup += endtag
+ return markup
+
+ numpy_int_types = tuple([
+ getattr(numpy, "%s%d" % (t, d)) for t in "int", "uint" for d in 8, 16, 32, 64
+ if hasattr(numpy, "%s%d" % (t, d))
+ ])
+ numpy_float_types = tuple([
+ getattr(numpy, "float%d" % d) for d in 32, 64, 96, 128
+ if hasattr(numpy, "float%d" % d)
+ ])
+
+ def renderAttrMarkup(self, attr, value, tags=None, verbose=None, mandatory=False):
+ # render ModelItems recursively via renderMarkup() above
+ if isinstance(value, ModelItem):
+ return value.renderMarkup(tags, attrname=(not mandatory and attr) or None)
+ # figure out enclosing tags
+ tag, endtag, tags = self._resolveTags(tags, attr)
+ # convert numpy types to float or complexes
+ if isinstance(value, self.numpy_int_types):
+ value = int(value)
+ elif isinstance(value, self.numpy_float_types):
+ value = float(value)
+ elif numpy.iscomplexobj(value):
+ value = complex(value)
+ # render opening tags
+ markup = "<%s mdltype=%s " % (tag, type(value).__name__)
+ if not mandatory:
+ markup += "mdlattr=\"%s\" " % attr
+ # if rendering table row, use TD to render comments
+ if verbose is None:
+ verbose = attr; # and self.attr_verbose.get(attr)
+ if tag == "TR":
+ comment = "%s | "
+ else:
+ comment = "%s "
+ # render lists or tuples iteratively
+ if isinstance(value, (list, tuple)):
+ markup += ">"
+ if verbose:
+ markup += comment % (verbose + ":")
+ for i, item in enumerate(value):
+ markup += self.renderAttrMarkup(str(i), item, mandatory=True, tags=tags)
+ # render dicts iteratively
+ elif isinstance(value, dict):
+ markup += ">"
+ if verbose:
+ markup += comment % (verbose + ":")
+ for key, item in sorted(value.iteritems()):
+ markup += self.renderAttrMarkup(key, item, tags=tags)
+ # render everything else inline
+ else:
+ if isinstance(value, str):
+ markup += "mdlval=\"'%s'\">" % value.replace("\"", "\\\"").replace("'", "\\'")
+ else:
+ markup += "mdlval=\"%s\">" % repr(value)
+ if verbose is attr:
+ markup += comment % ':'.join((attr, str(value)))
+ else:
+ markup += comment % ''.join((verbose, str(value)))
+ markup += endtag
+ return markup
+
+
+def _deg_to_dms(x, prec=0.01):
+ """Converts x (in degrees) into d,m,s tuple, where d and m are ints.
+ prec gives the precision, in arcseconds."""
+ mins, secs = divmod(round(x * 3600 / prec) * prec, 60)
+ mins = int(mins)
+ degs, mins = divmod(mins, 60)
+ return degs, mins, secs
+
+
+class Position(ModelItem):
+ mandatory_attrs = ["ra", "dec"]
+ optional_attrs = dict(ra_err=None, dec_err=None)
+
+ @staticmethod
+ def ra_hms_static(rad, scale=12, prec=0.01):
+ """Returns RA as tuple of (h,m,s)"""
+ # convert negative values
+ while rad < 0:
+ rad += 2 * math.pi
+ # convert to hours
+ rad *= scale / math.pi
+ return _deg_to_dms(rad, prec)
+
+ def ra_hms(self, prec=0.01):
+ return self.ra_hms_static(self.ra, scale=12, prec=prec)
+
+ def ra_dms(self, prec=0.01):
+ return self.ra_hms_static(self.ra, scale=180, prec=prec)
+
+ @staticmethod
+ def dec_dms_static(rad, prec=0.01):
+ return Position.dec_sdms_static(rad, prec)[1:]
+
+ @staticmethod
+ def dec_sdms_static(rad, prec=0.01):
+ """Returns Dec as tuple of (sign,d,m,s). Sign is "+" or "-"."""
+ sign = "-" if rad < 0 else "+"
+ d, m, s = _deg_to_dms(abs(rad) * DEG, prec)
+ return (sign, d, m, s)
+
+ def dec_sdms(self, prec=0.01):
+ return self.dec_sdms_static(self.dec, prec)
+
+
+class Flux(ModelItem):
+ mandatory_attrs = ["I"]
+ optional_attrs = dict(I_err=None)
+
+ def rescale(self, scale):
+ self.I *= scale
+
+
+class Polarization(Flux):
+ mandatory_attrs = Flux.mandatory_attrs + ["Q", "U", "V"]
+ optional_attrs = dict(I_err=None, Q_err=None, U_err=None, V_err=None)
+
+ def rescale(self, scale):
+ for stokes in "IQUV":
+ setattr(self, stokes, getattr(self, stokes) * scale)
+
+
+class PolarizationWithRM(Polarization):
+ mandatory_attrs = Polarization.mandatory_attrs + ["rm", "freq0"]
+ optional_attrs = dict(Polarization.optional_attrs, rm_err=None)
+
+
+class Spectrum(ModelItem):
+ """The Spectrum class is an abstract representation of spectral information. The base implementation corresponds
+ to a flat spectrum.
+ """
+
+ def normalized_intensity(self, freq):
+ """Returns the normalized intensity for a given frequency,normalized to unity at the reference frequency (if any)"""
+ return 1
+
+
+class SpectralIndex(Spectrum):
+ mandatory_attrs = ["spi", "freq0"]
+ optional_attrs = dict(spi_err=None)
+
+ def normalized_intensity(self, freq):
+ """Returns the normalized intensity for a given frequency, normalized to unity at the reference frequency (if any)"""
+ if isinstance(self.spi, (list, tuple)):
+ spi = self.spi[0]
+ logfreq = numpy.log(freq / self.freq0)
+ for i, x in enumerate(self.spi[1:]):
+ spi = spi + x * (logfreq ** (i + 1))
+ else:
+ spi = self.spi
+ return (freq / self.freq0) ** spi
+
+
+class Shape(ModelItem):
+ """Abstract base class for a source's brightness distribution.
+ The ex/ey/pa attributes give the overall shape of the source."""
+ mandatory_attrs = ["ex", "ey", "pa"]
+ optional_attrs = dict(ex_err=None, ey_err=None, pa_err=None)
+
+ def getShape(self):
+ return self.ex, self.ey, self.pa
+
+ def getShapeErr(self):
+ err = [getattr(self, a + '_err', None) for a in self.mandatory_attrs]
+ if all([a is None for a in err]):
+ return None
+ return tuple(err)
+
+
+class Gaussian(Shape):
+ typecode = "Gau"
+
+ def strDesc(self, delimiters=('"', "x", "@", "deg"), **kw):
+ return """%.2g%s%s%.2g%s%s%d%s""" % (
+ self.ex * DEG * 3600, delimiters[0], delimiters[1], self.ey * DEG * 3600, delimiters[0],
+ delimiters[2], round(self.pa * DEG), delimiters[3])
+
+ def strDescErr(self, delimiters=('"', "x", "@", "deg"), **kw):
+ err = self.getShapeErr()
+ return err and """%.2g%s%s%.2g%s%s%d%s""" % (
+ err[0] * DEG * 3600, delimiters[0], delimiters[1], err[1] * DEG * 3600, delimiters[0],
+ delimiters[2], round(err[2] * DEG), delimiters[3])
+
+
+class FITSImage(Shape):
+ typecode = "FITS"
+ mandatory_attrs = Shape.mandatory_attrs + ["filename", "nx", "ny"]
+ optional_attrs = dict(pad=2)
+
+ def strDesc(self, **kw):
+ return """%s %dx%d""" % (os.path.basename(self.filename), self.nx, self.ny)
+
+
+startup_dprint(1, "end of class defs")
# populate dict of AllowedTypes with all classes defined so far
-globs = list(globals().iteritems());
+globs = list(globals().iteritems())
-AllowedTypes = dict(AtomicTypes.iteritems());
+AllowedTypes = dict(AtomicTypes.iteritems())
AllowedTypes['NoneType'] = type(None); # this must be a type, otherwise isinstance() doesn't work
-for name,val in globs:
- if isinstance(val,type):
- AllowedTypes[name] = val;
-AllowedTypesTuple = tuple(AllowedTypes.itervalues());
+for name, val in globs:
+ if isinstance(val, type):
+ AllowedTypes[name] = val
+AllowedTypesTuple = tuple(AllowedTypes.itervalues())
-startup_dprint(1,"end of ModelClasses");
+startup_dprint(1, "end of ModelClasses")
diff --git a/Tigger/Models/PlotStyles.py b/Tigger/Models/PlotStyles.py
index 431c587..956dd3c 100644
--- a/Tigger/Models/PlotStyles.py
+++ b/Tigger/Models/PlotStyles.py
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
#
-#% $Id$
+# % $Id$
#
#
# Copyright (C) 2002-2011
@@ -24,105 +24,114 @@
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
-import ModelClasses
import math
+import ModelClasses
+
# string used to indicate default value of an attribute
-DefaultValue = "default";
+DefaultValue = "default"
# string used to indicate "none" value of an attribute
-NoneValue = "none";
+NoneValue = "none"
# definitive list of style attributes
-StyleAttributes = [ "symbol","symbol_color","symbol_size","symbol_linewidth","label","label_color","label_size" ];
+StyleAttributes = ["symbol", "symbol_color", "symbol_size", "symbol_linewidth", "label", "label_color", "label_size"]
# dict of attribute labels (i.e. for menus, column headings and such)
-StyleAttributeLabels = dict(symbol="symbol",symbol_color="color",symbol_size="size",symbol_linewidth="line width",
- label="label",label_color="color",label_size="size");
+StyleAttributeLabels = dict(symbol="symbol", symbol_color="color", symbol_size="size", symbol_linewidth="line width",
+ label="label", label_color="color", label_size="size")
# dict of attribute types. Any attribute not in this dict is of type str.
-StyleAttributeTypes = dict(symbol_size=int,symbol_linewidth=int,label_size=int);
+StyleAttributeTypes = dict(symbol_size=int, symbol_linewidth=int, label_size=int)
# list of known colors
-ColorList = [ "black","blue","lightblue","green","lightgreen","cyan","red","orange red","purple","magenta","yellow","white" ];
-DefaultColor = "black";
+ColorList = ["black", "blue", "lightblue", "green", "lightgreen", "cyan", "red", "orange red", "purple", "magenta",
+ "yellow", "white"]
+DefaultColor = "black"
# dict and method to pick a contrasting color (i.e. suitable as background for specified color)
-ContrastColor = dict(white="#404040",yellow="#404040");
-DefaultContrastColor = "#B0B0B0";
+ContrastColor = dict(white="#404040", yellow="#404040")
+DefaultContrastColor = "#B0B0B0"
-def getContrastColor (color):
- return ContrastColor.get(color,DefaultContrastColor);
+
+def getContrastColor(color):
+ return ContrastColor.get(color, DefaultContrastColor)
# dict of possible user settings for each attribute
StyleAttributeOptions = dict(
- symbol = [ DefaultValue,NoneValue,"cross","plus","dot","circle","square","diamond" ],
- symbol_color = [ DefaultValue ] + ColorList,
- label = [ DefaultValue,NoneValue,"%N","%N %BJy","%N %BJy r=%R'" ],
- label_color = [ DefaultValue ] + ColorList,
- label_size = [ DefaultValue,6,8,10,12,14 ],
-);
+ symbol=[DefaultValue, NoneValue, "cross", "plus", "dot", "circle", "square", "diamond"],
+ symbol_color=[DefaultValue] + ColorList,
+ label=[DefaultValue, NoneValue, "%N", "%N %BJy", "%N %BJy r=%R'"],
+ label_color=[DefaultValue] + ColorList,
+ label_size=[DefaultValue, 6, 8, 10, 12, 14],
+)
# constants for the show_list and show_plot attributes
-ShowNot = 0;
-ShowDefault = 1;
-ShowAlways = 2;
+ShowNot = 0
+ShowDefault = 1
+ShowAlways = 2
+
+DefaultPlotAttrs = dict(symbol=None, symbol_color=DefaultColor, symbol_size=5, symbol_linewidth=0,
+ label=None, label_color=DefaultColor, label_size=10,
+ show_list=ShowDefault, show_plot=ShowDefault, apply=0)
-DefaultPlotAttrs = dict(symbol=None,symbol_color=DefaultColor,symbol_size=5,symbol_linewidth=0,
- label=None,label_color=DefaultColor,label_size=10,
- show_list=ShowDefault,show_plot=ShowDefault,apply=0);
+class PlotStyle(ModelClasses.ModelItem):
+ optional_attrs = DefaultPlotAttrs
-class PlotStyle (ModelClasses.ModelItem):
- optional_attrs = DefaultPlotAttrs;
+ def copy(self):
+ return PlotStyle(
+ **dict([(attr, getattr(self, attr, default)) for attr, default in DefaultPlotAttrs.iteritems()]))
- def copy (self):
- return PlotStyle(**dict([(attr,getattr(self,attr,default)) for attr,default in DefaultPlotAttrs.iteritems()]))
+ def update(self, other):
+ for attr in DefaultPlotAttrs.iterkeys():
+ val = getattr(other, attr, None)
+ if val is not None and val != DefaultValue:
+ setattr(self, attr, val)
- def update (self,other):
- for attr in DefaultPlotAttrs.iterkeys():
- val = getattr(other,attr,None);
- if val is not None and val != DefaultValue:
- setattr(self,attr,val);
-PlotStyle.registerClass();
+PlotStyle.registerClass()
# Default plot style. This must define everything! (I.e. no DefaultValue elements allowed.)
-BaselinePlotStyle = PlotStyle(symbol="plus",symbol_color="yellow",symbol_size=2,symbol_linewidth=0,
- label=NoneValue,label_color="blue",label_size=6,
- show_list=ShowAlways,show_plot=ShowAlways,apply=1000);
+BaselinePlotStyle = PlotStyle(symbol="plus", symbol_color="yellow", symbol_size=2, symbol_linewidth=0,
+ label=NoneValue, label_color="blue", label_size=6,
+ show_list=ShowAlways, show_plot=ShowAlways, apply=1000)
-SelectionPlotStyle = PlotStyle(symbol=DefaultValue,symbol_color="cyan",symbol_size=DefaultValue,symbol_linewidth=DefaultValue,
- label="%N",label_color="green",label_size=DefaultValue,
- show_list=ShowAlways,show_plot=ShowAlways,apply=-1);
+SelectionPlotStyle = PlotStyle(symbol=DefaultValue, symbol_color="cyan", symbol_size=DefaultValue,
+ symbol_linewidth=DefaultValue,
+ label="%N", label_color="green", label_size=DefaultValue,
+ show_list=ShowAlways, show_plot=ShowAlways, apply=-1)
-HighlightPlotStyle = PlotStyle(symbol=DefaultValue,symbol_color="red",symbol_size=DefaultValue,symbol_linewidth=DefaultValue,
- label="%N %BJy",label_color="red",label_size=12,
- show_list=ShowAlways,show_plot=ShowAlways,apply=-2);
+HighlightPlotStyle = PlotStyle(symbol=DefaultValue, symbol_color="red", symbol_size=DefaultValue,
+ symbol_linewidth=DefaultValue,
+ label="%N %BJy", label_color="red", label_size=12,
+ show_list=ShowAlways, show_plot=ShowAlways, apply=-2)
-DefaultPlotStyle = PlotStyle(symbol=DefaultValue,symbol_color=DefaultValue,symbol_size=DefaultValue,symbol_linewidth=DefaultValue,
- label=DefaultValue,label_color=DefaultValue,label_size=DefaultValue,
- show_list=ShowDefault,show_plot=ShowDefault,apply=1000);
+DefaultPlotStyle = PlotStyle(symbol=DefaultValue, symbol_color=DefaultValue, symbol_size=DefaultValue,
+ symbol_linewidth=DefaultValue,
+ label=DefaultValue, label_color=DefaultValue, label_size=DefaultValue,
+ show_list=ShowDefault, show_plot=ShowDefault, apply=1000)
# cache of precompiled labels
-_compiled_labels = {};
+_compiled_labels = {}
# label replacements
-_label_keys = { "%N": lambda src:src.name,
- "%B": lambda src:"%.2g"%src.brightness(),
- "%R": lambda src:(hasattr(src,'r') and "%.2g"%(src.r/math.pi*180*60)) or "",
- "%T": lambda src:src.typecode,
- "%I": lambda src:"%.2g"%getattr(src.flux,'I',0),
- "%Q": lambda src:"%.2g"%getattr(src.flux,'Q',0),
- "%U": lambda src:"%.2g"%getattr(src.flux,'U',0),
- "%V": lambda src:"%.2g"%getattr(src.flux,'V',0),
-};
-
-def makeSourceLabel (label,src):
- if label == NoneValue or label is None:
- return "";
- global _label_keys;
- lbl = label;
- for key,func in _label_keys.iteritems():
- if lbl.find(key) >= 0:
- lbl = lbl.replace(key,func(src));
- return lbl;
+_label_keys = {"%N": lambda src: src.name,
+ "%B": lambda src: "%.2g" % src.brightness(),
+ "%R": lambda src: (hasattr(src, 'r') and "%.2g" % (src.r / math.pi * 180 * 60)) or "",
+ "%T": lambda src: src.typecode,
+ "%I": lambda src: "%.2g" % getattr(src.flux, 'I', 0),
+ "%Q": lambda src: "%.2g" % getattr(src.flux, 'Q', 0),
+ "%U": lambda src: "%.2g" % getattr(src.flux, 'U', 0),
+ "%V": lambda src: "%.2g" % getattr(src.flux, 'V', 0),
+ }
+
+
+def makeSourceLabel(label, src):
+ if label == NoneValue or label is None:
+ return ""
+ global _label_keys
+ lbl = label
+ for key, func in _label_keys.iteritems():
+ if lbl.find(key) >= 0:
+ lbl = lbl.replace(key, func(src))
+ return lbl
diff --git a/Tigger/Models/SkyModel.py b/Tigger/Models/SkyModel.py
index 48671d9..6dd94dd 100644
--- a/Tigger/Models/SkyModel.py
+++ b/Tigger/Models/SkyModel.py
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
#
-#% $Id$
+# % $Id$
#
#
# Copyright (C) 2002-2011
@@ -24,415 +24,434 @@
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
-from ModelClasses import ModelItem
+import re
+
import PlotStyles
+from ModelClasses import ModelItem
+from Tigger.Coordinates import angular_dist_pos_angle, DEG
-import re
-from Tigger.Coordinates import angular_dist_pos_angle,DEG
-
-class ModelTag (ModelItem):
- mandatory_attrs = [ "name" ];
- optional_attrs = dict([ (attr,None) for attr in PlotStyles.StyleAttributes ]);
-
-ModelTag.registerClass();
-
-class ModelTagSet (ModelItem):
- def __init__ (self,*tags,**kws):
- ModelItem.__init__(self,**kws);
- self.tags = dict([ (tag.name,tag) for tag in tags ]);
-
- def add (self,tag):
- """Adds a ModelTag object to the tag set""";
- self.tags[tag.name] = tag;
-
- def get (self,tagname):
- """Returns ModelTag object associated with tag name, inserting a new one if not found""";
- return self.tags.setdefault(tagname,ModelTag(tagname));
-
- def getAll (self):
- all = self.tags.values();
- all.sort(lambda a,b:cmp(a.name,b.name));
- return all;
-
- def addNames (self,names):
- """Ensures that ModelTag objects are initialized for all tagnames in names""";
- for name in names:
- self.tags.setdefault(name,ModelTag(name));
-
- def renderMarkup (self,tag="A",attrname=None):
- """Makes a markup string corresponding to the model item.
- 'tags' is the HTML tag to use.
- """;
- # opening tag
- markup = "<%s mdltype=ModelTagList "%tag;
- if attrname is not None:
- markup += "mdlattr=%s "%attrname;
- markup +=">";
- # write mandatory attributes
- for name,tt in self.tags.iteritems():
- markup += self.renderAttrMarkup(name,tt,tag="TR",mandatory=True);
- # closing tag
- markup += "%s>"%tag;
- return markup;
-ModelTagSet.registerClass();
-
-class Source (ModelItem):
- """Source represents a model source.
- Each source has mandatory name (class str), pos (class Position) and flux (class Flux) model attributes.
- There are optional spectrum (class Spectrum) and shape (class Shape) model attributes.
-
- Standard Python attributes of a Source object are:
- selected: if the source is selected (e.g. in a selection widget)
- typecode: a type code. This is "pnt" if no shape is set (i.e.for a delta-function), otherwise it's the shape's typecode.
- """;
- mandatory_attrs = [ "name","pos","flux" ];
- optional_attrs = dict(spectrum=None,shape=None);
- allow_extra_attrs = True;
-
- def __init__ (self,*args,**kw):
- ModelItem.__init__(self,*args,**kw);
- self.typecode = (self.shape and self.shape.typecode) or "pnt";
- self.selected = False;
-
- def select (self,sel=True):
- self.selected = sel;
-
- def brightness (self):
- iapp = getattr(self,'Iapp',None);
- if iapp is not None:
- return iapp;
- else:
- return getattr(self.flux,'I',0.);
-
- def get_attr (self,attr,default=None):
- return getattr(self,attr,default);
-
- def getTagNames (self):
- return [ attr for attr,val in self.getExtraAttributes() if attr[0] != "_" ];
-
- def getTags (self):
- return [ (attr,val) for attr,val in self.getExtraAttributes() if attr[0] != "_" ];
-
- getTag = get_attr;
- setTag = ModelItem.setAttribute;
-
- class Grouping (object):
- # show_plot settings
- NoPlot = 0;
- Default = 1;
- Plot = 2;
- def __init__ (self,name,func,style=PlotStyles.DefaultPlotStyle,sources=None):
- self.name = name;
- self.style = style;
- self.func = func;
- self.total = 0;
- if sources:
- self.computeTotal(sources);
- def computeTotal (self,sources):
- self.total = len(filter(self.func,sources));
- return self.total;
-
-Source.registerClass();
-
-class SkyModel (ModelItem):
- optional_attrs = dict(name=None,plotstyles={},pbexp=None,ra0=None,dec0=None,freq0=None);
- allow_extra_attrs = True;
-
- def __init__ (self,*sources,**kws):
- ModelItem.__init__(self,**kws);
- # "current" source (grouping "current" below is defined as that one source)
- self._current_source = None;
- self._filename = None;
- # list of loaded images associated with this model
- self._images = [];
- # setup source list
- self.setSources(sources);
-
- def copy (self):
- return SkyModel(*self.sources,**dict(self.getAttributes()));
-
- def images (self):
- """Returns list of images associated with this model""";
- return self._images;
-
- def setFilename (self,filename):
- self._filename = filename;
-
- def filename (self):
- return self._filename;
-
- def setCurrentSource (self,src,origin=None):
- """Changes the current source. If it has indeed changed, emits a currentSourceChanged signal. Arguments passed with the signal:
- src: the new current source.
- src0: the previously current source.
- origin: originator of changes.
- """;
- if self._current_source is not src:
- src0 = self._current_source;
- self._current_source = src;
- if self.signalsEnabled():
- self.emit("changeCurrentSource",src,src0,origin);
-
- def currentSource (self):
- return self._current_source;
-
- # Bitflags for the 'what' argument of the updated() signal below.
- # These indicate what exactly has been updated:
- UpdateSourceList = 1; # source list changed
- UpdateSourceContent = 2; # source attributes have changed
- UpdateTags = 4; # tags have been changed
- UpdateGroupVis = 8; # visibility of a grouping (group.style.show_list attribute) has changed
- UpdateGroupStyle = 16; # plot style of a grouping has changed
- UpdateSelectionOnly = 32; # (in combination with UpdateSourceContent): update only affects currently selected sources
- UpdateAll = UpdateSourceList +UpdateSourceContent+UpdateTags+UpdateGroupVis+UpdateGroupStyle ;
-
- def emitUpdate (self,what=UpdateSourceContent,origin=None):
- """emits an updated() signal, indicating that the model has changed. Arguments passed through with the signal:
- what: what is updated. A combination of flags above.
- origin: originator of changes.
+class ModelTag(ModelItem):
+ mandatory_attrs = ["name"]
+ optional_attrs = dict([(attr, None) for attr in PlotStyles.StyleAttributes])
+
+
+ModelTag.registerClass()
+
+
+class ModelTagSet(ModelItem):
+ def __init__(self, *tags, **kws):
+ ModelItem.__init__(self, **kws)
+ self.tags = dict([(tag.name, tag) for tag in tags])
+
+ def add(self, tag):
+ """Adds a ModelTag object to the tag set"""
+ self.tags[tag.name] = tag
+
+ def get(self, tagname):
+ """Returns ModelTag object associated with tag name, inserting a new one if not found"""
+ return self.tags.setdefault(tagname, ModelTag(tagname))
+
+ def getAll(self):
+ all = self.tags.values()
+ all.sort(lambda a, b: cmp(a.name, b.name))
+ return all
+
+ def addNames(self, names):
+ """Ensures that ModelTag objects are initialized for all tagnames in names"""
+ for name in names:
+ self.tags.setdefault(name, ModelTag(name))
+
+ def renderMarkup(self, tag="A", attrname=None):
+ """Makes a markup string corresponding to the model item.
+ 'tags' is the HTML tag to use.
+ """
+ # opening tag
+ markup = "<%s mdltype=ModelTagList " % tag
+ if attrname is not None:
+ markup += "mdlattr=%s " % attrname
+ markup += ">"
+ # write mandatory attributes
+ for name, tt in self.tags.iteritems():
+ markup += self.renderAttrMarkup(name, tt, tag="TR", mandatory=True)
+ # closing tag
+ markup += "%s>" % tag
+ return markup
+
+
+ModelTagSet.registerClass()
+
+
+class Source(ModelItem):
+ """Source represents a model source.
+ Each source has mandatory name (class str), pos (class Position) and flux (class Flux) model attributes.
+ There are optional spectrum (class Spectrum) and shape (class Shape) model attributes.
+
+ Standard Python attributes of a Source object are:
+ selected: if the source is selected (e.g. in a selection widget)
+ typecode: a type code. This is "pnt" if no shape is set (i.e.for a delta-function), otherwise it's the shape's typecode.
"""
- if self.signalsEnabled():
- self.emit("updated",what,origin);
-
- def emitSelection (self,origin=None):
- """emits an selected() signal, indicating that the selection has changed. Arguments passed through with the signal:
- num: number of selected sources.
- origin: originator of changes.
- """;
- self.selgroup.computeTotal(self.sources);
- if self.signalsEnabled():
- self.emit("selected",self.selgroup.total,origin);
-
- def emitChangeGroupingVisibility (self,group,origin=None):
- if self.signalsEnabled():
- self.emit("changeGroupingVisibility",group,origin);
- self.emitUpdate(SkyModel.UpdateGroupVis,origin);
-
- def emitChangeGroupingStyle (self,group,origin=None):
- if self.signalsEnabled():
- self.emit("changeGroupingStyle",group,origin);
- self.emitUpdate(SkyModel.UpdateGroupStyle,origin);
-
- def findSource (self,name):
- return self._src_by_name[name];
-
- def setSources (self,sources,origin=None,recompute_r=False):
- # if recompute_r is True, recomputes the 'r' attribute for all sources
- self.sources = list(sources);
- self._src_by_name = dict([(src.name,src) for src in self.sources]);
- if recompute_r:
- self.recomputeRadialDistance();
- self.scanTags();
- self.initGroupings();
-
- def addSources (self,sources,recompute_r=True):
- # if recompute_r is True, recomputes the 'r' attribute for new sources
- if recompute_r:
- self.recomputeRadialDistance(sources);
- self.setSources(list(self.sources)+list(sources));
-
- def __len__ (self):
- return len(self.sources);
-
- def __getitem__ (self,key):
- if isinstance(key,(int,slice)):
- return self.sources[key];
- elif isinstance(key,str):
- return self.findSource(key);
- else:
- raise TypeError("cannot index SkyModel with key of type %s"%str(type(key)));
-
- def __setitem__ (self,key,value):
- raise TypeError("cannot assign to items of SkyModel, use the setSources() method instead");
-
- def __iter__ (self):
- return iter(self.sources);
-
- def recomputeRadialDistance (self,sources=None):
- # refreshes the radial distance for a group of sources, or all sources in the model
- if (self.ra0 and self.dec0) is not None:
- for src in (sources or self.sources):
- r,pa = angular_dist_pos_angle(src.pos.ra,src.pos.dec,self.ra0,self.dec0);
- src.setAttribute('r',r);
-
- def scanTags (self,sources=None):
- """Populates self.tagnames with a list of tags present in sources""";
- sources = sources or self.sources;
- tagnames = set();
- for src in sources:
- tagnames.update(src.getTagNames());
- self.tagnames = list(tagnames);
- self.tagnames.sort();
-
- def initGroupings (self):
- # init default and "selected" groupings
- # For the default style, make sure all style fields are initialied to proper values, so that some style setting is always guaranteed.
- # Do this by sarting with the Baseline style, and applying the specified default style to it as an update.
- if 'default' in self.plotstyles:
- defstyle = PlotStyles.BaselinePlotStyle.copy();
- defstyle.update(self.plotstyles['default']);
- defstyle.apply = 1000; # apply at lowest priority
- else:
- defstyle = self.plotstyles['default'] = PlotStyles.BaselinePlotStyle;
- self.defgroup = Source.Grouping("all sources",func=lambda src:True,sources=self.sources,style=defstyle);
- self.curgroup = Source.Grouping("current source",func=lambda src:self.currentSource() is src,sources=self.sources,
- style=self.plotstyles.setdefault('current',PlotStyles.HighlightPlotStyle));
- self.selgroup = Source.Grouping("selected sources",func=lambda src:getattr(src,'selected',False),sources=self.sources,
- style=self.plotstyles.setdefault('selected',PlotStyles.SelectionPlotStyle));
- # and make ordered list of groupings
- self.groupings = [ self.defgroup,self.curgroup,self.selgroup ];
- # make groupings from available source types
- self._typegroups = {};
- typecodes = list(set([src.typecode for src in self.sources]));
- typecodes.sort();
- if len(typecodes) > 1:
- for code in typecodes:
- self._typegroups[code] = group = Source.Grouping("type: %s"%code,lambda src,code=code:src.typecode==code,sources=self.sources,
- style=self.plotstyles.setdefault('type:%s'%code,PlotStyles.DefaultPlotStyle));
- self.groupings.append(group);
- # make groupings from source tags
- self._taggroups = {};
- for tag in self.tagnames:
- self._taggroups[tag] = group = Source.Grouping("tag: %s"%tag,
- lambda src,tag=tag:getattr(src,tag,None) not in [None,False],
- sources=self.sources,
- style=self.plotstyles.setdefault('tag:%s'%tag,PlotStyles.DefaultPlotStyle));
- self.groupings.append(group);
-
- def _remakeGroupList (self):
- self.groupings = [ self.defgroup,self.curgroup,self.selgroup ];
- typenames = self._typegroups.keys();
- typenames.sort();
- self.groupings += [ self._typegroups[name] for name in typenames ];
- self.groupings += [ self._taggroups[name] for name in self.tagnames ];
-
- def getTagGrouping (self,tag):
- return self._taggroups[tag];
-
- def getTypeGrouping (self,typename):
- return self._typegroups[typename];
-
- def getSourcePlotStyle (self,src):
- """Returns PlotStyle object for given source, using the styles in the model grouping.
- Returns tuple of plotstyle,label, or None,None if no source is to be plotted.
- """;
- # get list of styles from groupings to which this source belongs
- styles = [ group.style for group in self.groupings if group.func(src) ];
- # sort in order of priority (high apply to low apply)
- styles.sort(lambda a,b:cmp(b.apply,a.apply));
- # "show_plot" attribute: if at least one group is showing explicitly, show
- # else if at least one group is hiding explicitly, hide
- # else use default setting
- show = [ st.show_plot for st in styles ];
- if show and max(show) == PlotStyles.ShowAlways:
- show = True;
- elif show and min(show) == PlotStyles.ShowNot:
- show = False;
- else:
- show = bool(style0.show_plot);
- if not show:
- return None,None;
- # sort styles
- # Override attributes in style object with non-default attributes found in each matching grouping
- # Go in reverse, so 'current' overrides 'selected' overrides types overrides tags
- style = None;
- for st in styles:
- if st.apply:
- # make copy-on-write, so we don't overwrite the original style object
- if style is None:
- style = st.copy();
+ mandatory_attrs = ["name", "pos", "flux"]
+ optional_attrs = dict(spectrum=None, shape=None)
+ allow_extra_attrs = True
+
+ def __init__(self, *args, **kw):
+ ModelItem.__init__(self, *args, **kw)
+ self.typecode = (self.shape and self.shape.typecode) or "pnt"
+ self.selected = False
+
+ def select(self, sel=True):
+ self.selected = sel
+
+ def brightness(self):
+ iapp = getattr(self, 'Iapp', None)
+ if iapp is not None:
+ return iapp
else:
- style.update(st);
- return style,PlotStyles.makeSourceLabel(style.label,src);
-
- def addTag (self,tag):
- if tag in self.tagnames:
- return False;
- # tags beginning with "_" are internal, not added to tagname list
- if tag[0] == "_":
- return False;
- # add to list
- self.tagnames.append(tag);
- self.tagnames.sort();
- # add to groupings
- self._taggroups[tag] = Source.Grouping("tag: %s"%tag,
- lambda src,tag=tag:getattr(src,tag,None) not in [None,False],
- sources=self.sources,
- style=self.plotstyles.setdefault('tag:%s'%tag,PlotStyles.DefaultPlotStyle));
- # reform grouping list
- self._remakeGroupList();
- return True;
-
- def setFieldCenter (self,ra0,dec0):
- self.ra0,self.dec0 = ra0,dec0;
-
- def setPrimaryBeam (self,pbexp):
- self.pbexp = pbexp;
-
- def primaryBeam (self):
- return getattr(self,'pbexp',None);
-
- def setRefFreq (self,freq0):
- self.freq0 = freq0;
-
- def refFreq (self):
- return self.freq0;
-
- def hasFieldCenter (self):
- return self.ra0 is not None and self.dec0 is not None;
-
- def fieldCenter (self):
- """Returns center of field. If this is not explicitly specified in the model, uses the average position of all sources.""";
- if self.ra0 is None:
- self.ra0 = reduce(lambda x,y:x+y,[ src.pos.ra for src in self.sources ])/len(self.sources) if self.sources else 0;
- if self.dec0 is None:
- self.dec0 = reduce(lambda x,y:x+y,[ src.pos.dec for src in self.sources ])/len(self.sources) if self.sources else 0;
- return self.ra0,self.dec0;
-
- def save (self,filename,format=None, verbose=True):
- """Convenience function, saves model to file. Format may be specified explicitly, or determined from filename.""";
- import Formats
- Formats.save(self,filename,format=format, verbose=verbose);
-
- _re_bynumber = re.compile("^([!-])?(\\d+)?:(\\d+)?$");
-
- def getSourcesNear (self,ra,dec,tolerance=DEG/60):
- return [ src for src in self.sources if angular_dist_pos_angle(src.pos.ra,src.pos.dec,ra,dec)[0] 1:
+ for code in typecodes:
+ self._typegroups[code] = group = Source.Grouping("type: %s" % code,
+ lambda src, code=code: src.typecode == code,
+ sources=self.sources,
+ style=self.plotstyles.setdefault('type:%s' % code,
+ PlotStyles.DefaultPlotStyle))
+ self.groupings.append(group)
+ # make groupings from source tags
+ self._taggroups = {}
+ for tag in self.tagnames:
+ self._taggroups[tag] = group = Source.Grouping("tag: %s" % tag,
+ lambda src, tag=tag: getattr(src, tag, None) not in [None,
+ False],
+ sources=self.sources,
+ style=self.plotstyles.setdefault('tag:%s' % tag,
+ PlotStyles.DefaultPlotStyle))
+ self.groupings.append(group)
+
+ def _remakeGroupList(self):
+ self.groupings = [self.defgroup, self.curgroup, self.selgroup]
+ typenames = self._typegroups.keys()
+ typenames.sort()
+ self.groupings += [self._typegroups[name] for name in typenames]
+ self.groupings += [self._taggroups[name] for name in self.tagnames]
+
+ def getTagGrouping(self, tag):
+ return self._taggroups[tag]
+
+ def getTypeGrouping(self, typename):
+ return self._typegroups[typename]
+
+ def getSourcePlotStyle(self, src):
+ """Returns PlotStyle object for given source, using the styles in the model grouping.
+ Returns tuple of plotstyle,label, or None,None if no source is to be plotted.
+ """
+ # get list of styles from groupings to which this source belongs
+ styles = [group.style for group in self.groupings if group.func(src)]
+ # sort in order of priority (high apply to low apply)
+ styles.sort(lambda a, b: cmp(b.apply, a.apply))
+ # "show_plot" attribute: if at least one group is showing explicitly, show
+ # else if at least one group is hiding explicitly, hide
+ # else use default setting
+ show = [st.show_plot for st in styles]
+ if show and max(show) == PlotStyles.ShowAlways:
+ show = True
+ elif show and min(show) == PlotStyles.ShowNot:
+ show = False
+ else:
+ show = bool(style0.show_plot)
+ if not show:
+ return None, None
+ # sort styles
+ # Override attributes in style object with non-default attributes found in each matching grouping
+ # Go in reverse, so 'current' overrides 'selected' overrides types overrides tags
+ style = None
+ for st in styles:
+ if st.apply:
+ # make copy-on-write, so we don't overwrite the original style object
+ if style is None:
+ style = st.copy()
+ else:
+ style.update(st)
+ return style, PlotStyles.makeSourceLabel(style.label, src)
+
+ def addTag(self, tag):
+ if tag in self.tagnames:
+ return False
+ # tags beginning with "_" are internal, not added to tagname list
+ if tag[0] == "_":
+ return False
+ # add to list
+ self.tagnames.append(tag)
+ self.tagnames.sort()
+ # add to groupings
+ self._taggroups[tag] = Source.Grouping("tag: %s" % tag,
+ lambda src, tag=tag: getattr(src, tag, None) not in [None, False],
+ sources=self.sources,
+ style=self.plotstyles.setdefault('tag:%s' % tag,
+ PlotStyles.DefaultPlotStyle))
+ # reform grouping list
+ self._remakeGroupList()
+ return True
+
+ def setFieldCenter(self, ra0, dec0):
+ self.ra0, self.dec0 = ra0, dec0
+
+ def setPrimaryBeam(self, pbexp):
+ self.pbexp = pbexp
+
+ def primaryBeam(self):
+ return getattr(self, 'pbexp', None)
+
+ def setRefFreq(self, freq0):
+ self.freq0 = freq0
+
+ def refFreq(self):
+ return self.freq0
+
+ def hasFieldCenter(self):
+ return self.ra0 is not None and self.dec0 is not None
+
+ def fieldCenter(self):
+ """Returns center of field. If this is not explicitly specified in the model, uses the average position of all sources."""
+ if self.ra0 is None:
+ self.ra0 = reduce(lambda x, y: x + y, [src.pos.ra for src in self.sources]) / len(
+ self.sources) if self.sources else 0
+ if self.dec0 is None:
+ self.dec0 = reduce(lambda x, y: x + y, [src.pos.dec for src in self.sources]) / len(
+ self.sources) if self.sources else 0
+ return self.ra0, self.dec0
+
+ def save(self, filename, format=None, verbose=True):
+ """Convenience function, saves model to file. Format may be specified explicitly, or determined from filename."""
+ import Formats
+ Formats.save(self, filename, format=format, verbose=verbose)
+
+ _re_bynumber = re.compile("^([!-])?(\\d+)?:(\\d+)?$")
+
+ def getSourcesNear(self, ra, dec, tolerance=DEG / 60):
+ return [src for src in self.sources if angular_dist_pos_angle(src.pos.ra, src.pos.dec, ra, dec)[0] < tolerance]
+
+ def getSourceSubset(self, selection=None):
+ """Gets list of sources matching the given selection string (if None, then all sources are returned.)"""
+ if not selection or selection.lower() == "all":
+ return self.sources
+ # sort by brightness
+ srclist0 = sorted(self.sources, lambda a, b: cmp(b.brightness(), a.brightness()))
+ all = set([src.name for src in srclist0])
+ srcs = set()
+ for ispec, spec in enumerate(re.split("\s+|,", selection)):
+ spec = spec.strip()
+ if spec:
+ # if first spec is a negation, then implictly select all sources first
+ if not ispec and spec[0] in "!-":
+ srcs = all
+ if spec.lower() == "all":
+ srcs = all
+ elif self._re_bynumber.match(spec):
+ negate, start, end = self._re_bynumber.match(spec).groups()
+ sl = slice(int(start) if start else None, int(end) if end else None)
+ if negate:
+ srcs.difference_update([src.name for src in srclist0[sl]])
+ else:
+ srcs.update([src.name for src in srclist0[sl]])
+ elif spec.startswith("-=") or spec.startswith("!="):
+ srcs.difference_update([src.name for src in srclist0 if getattr(src, spec[2:], None)])
+ elif spec.startswith("="):
+ srcs.update([src.name for src in srclist0 if getattr(src, spec[1:], None)])
+ elif spec.startswith("-") or spec.startswith("!"):
+ srcs.discard(spec[1:])
+ else:
+ srcs.add(spec)
+ # make list
+ return [src for src in srclist0 if src.name in srcs]
+
+
+SkyModel.registerClass()
diff --git a/Tigger/Models/__init__.py b/Tigger/Models/__init__.py
index db87c57..363ecc6 100644
--- a/Tigger/Models/__init__.py
+++ b/Tigger/Models/__init__.py
@@ -1,5 +1,5 @@
#
-#% $Id$
+# % $Id$
#
#
# Copyright (C) 2002-2011
@@ -22,4 +22,3 @@
# or write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
-
diff --git a/Tigger/SiameseInterface.py b/Tigger/SiameseInterface.py
index ba776f1..4f3617e 100644
--- a/Tigger/SiameseInterface.py
+++ b/Tigger/SiameseInterface.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
#
-#% $Id$
+# % $Id$
#
#
# Copyright (C) 2002-2011
@@ -24,252 +24,255 @@
# or write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+import math
import sys
-from Timba.TDL import TDLCompileOptions, TDLRuntimeOptions, TDLRuntimeOptions, TDLOption, TDLFileSelect, TDLMenu
-from Timba.utils import curry
-import traceback
import Meow
-import Meow.OptionTools
import Meow.Context
+import Meow.OptionTools
import Meow.ParmGroup
-import math
import os.path
-
from Meow.MeqMaker import SourceSubsetSelector
+from Timba.TDL import TDLCompileOptions, TDLRuntimeOptions, TDLOption, TDLFileSelect, TDLMenu
# find out where Tigger lives -- either it's in the path, or we add it
try:
- import Tigger
+ import Tigger
except:
- sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))));
- import Tigger
+ sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+ import Tigger
from Tigger.Models import ModelClasses
from Tigger.Models.Formats import ModelHTML
# this dict determines how source attributes are grouped into "parameter subgroups"
-_Subgroups = dict(I="I",Q="Q",U="U",V="V",
- ra="pos",dec="pos",RM="RM",spi="spi",
- sx="shape",sy="shape",phi="shape");
-_SubgroupOrder = "I","Q","U","V","pos","spi","RM","shape";
+_Subgroups = dict(I="I", Q="Q", U="U", V="V",
+ ra="pos", dec="pos", RM="RM", spi="spi",
+ sx="shape", sy="shape", phi="shape")
+_SubgroupOrder = "I", "Q", "U", "V", "pos", "spi", "RM", "shape"
-class TiggerSkyModel (object):
- """Interface to a Tigger-format sky model."""
- def __init__ (self,filename=None,include_options=False,tdloption_namespace='tiggerlsm'):
- """Initializes a TiggerSkyModel object.
- A filename and a format may be specified, although the actual file will
- only be loaded on demand.
- If include_options=True, immediately instantiates the options. If False, it is up to
- the caller to include the options in his menus.
- """;
- self.tdloption_namespace = tdloption_namespace;
- self._compile_opts = [];
- self._runtime_opts = [];
- self.filename = filename;
- self.lsm = None;
- # immediately include options, if needed
- if include_options:
- TDLCompileOptions(*self.compile_options());
- TDLRuntimeOptions(*self.runtime_options());
- def compile_options (self):
- """Returns list of compile-time options""";
- if not self._compile_opts:
- self._compile_opts = [
- TDLRuntimeOptions("filename","Tigger LSM file",
- TDLFileSelect("Tigger models (*."+ModelHTML.DefaultExtension+");;All files (*)",default=self.filename,exist=True),
- namespace=self),
- TDLOption('lsm_subset',"Source subset",["all"],more=str,namespace=self,
- doc=SourceSubsetSelector.docstring),
- TDLOption('null_subset',"Use nulls for subset",[None],more=str,namespace=self,doc=
- """If you wish, any subset of sources may be "nulled" by inserting a null
- brightness for them. This is used in some advanced calibration scenarios; if
- you're not sure about this option, just leave it set to "None".
-
"""+SourceSubsetSelector.docstring),
- TDLMenu("Make solvable source parameters",
- TDLOption('lsm_solvable_tag',"Solvable source tag",[None,"solvable"],more=str,namespace=self,
- doc="""If you specify a tagname, only sources bearing that tag will be made solvable. Use 'None' to make all sources solvable."""),
- TDLOption('lsm_solve_group_tag',"Group independent solutions by tag",[None,"cluster"],more=str,namespace=self,
- doc="""If you specify a tagname, sources will be grouped by the value of the tag,
- and each group will be treated as an independent solution."""),
- TDLOption("solve_I","I",False,namespace=self),
- TDLOption("solve_Q","Q",False,namespace=self),
- TDLOption("solve_U","U",False,namespace=self),
- TDLOption("solve_V","V",False,namespace=self),
- TDLOption("solve_spi","spectral index",False,namespace=self),
- TDLOption("solve_pos","position",False,namespace=self),
- TDLOption("solve_RM","rotation measure",False,namespace=self),
- TDLOption("solve_shape","shape (for extended sources)",False,namespace=self),
- toggle='solvable_sources',namespace=self,
- )
- ];
- return self._compile_opts;
+class TiggerSkyModel(object):
+ """Interface to a Tigger-format sky model."""
+
+ def __init__(self, filename=None, include_options=False, tdloption_namespace='tiggerlsm'):
+ """Initializes a TiggerSkyModel object.
+ A filename and a format may be specified, although the actual file will
+ only be loaded on demand.
+ If include_options=True, immediately instantiates the options. If False, it is up to
+ the caller to include the options in his menus.
+ """
+ self.tdloption_namespace = tdloption_namespace
+ self._compile_opts = []
+ self._runtime_opts = []
+ self.filename = filename
+ self.lsm = None
+ # immediately include options, if needed
+ if include_options:
+ TDLCompileOptions(*self.compile_options())
+ TDLRuntimeOptions(*self.runtime_options())
- def runtime_options (self):
- """Makes and returns list of compile-time options""";
- # no runtime options, for now
- return self._runtime_opts;
-
- # helper function for use with SourceSubsetSelector below
- @staticmethod
- def _getTagValue (src,tag):
- """Helper function: looks for the given tag in the source, or in its sub-objects""";
- for obj in src,src.pos,src.flux,getattr(src,'shape',None),getattr(src,'spectrum',None):
- if obj is not None and hasattr(obj,tag):
- return getattr(obj,tag);
- return None;
-
+ def compile_options(self):
+ """Returns list of compile-time options"""
+ if not self._compile_opts:
+ self._compile_opts = [
+ TDLRuntimeOptions("filename", "Tigger LSM file",
+ TDLFileSelect("Tigger models (*." + ModelHTML.DefaultExtension + ");;All files (*)",
+ default=self.filename, exist=True),
+ namespace=self),
+ TDLOption('lsm_subset', "Source subset", ["all"], more=str, namespace=self,
+ doc=SourceSubsetSelector.docstring),
+ TDLOption('null_subset', "Use nulls for subset", [None], more=str, namespace=self, doc=
+ """If you wish, any subset of sources may be "nulled" by inserting a null
+ brightness for them. This is used in some advanced calibration scenarios; if
+ you're not sure about this option, just leave it set to "None".
+ """ + SourceSubsetSelector.docstring),
+ TDLMenu("Make solvable source parameters",
+ TDLOption('lsm_solvable_tag', "Solvable source tag", [None, "solvable"], more=str,
+ namespace=self,
+ doc="""If you specify a tagname, only sources bearing that tag will be made solvable. Use 'None' to make all sources solvable."""),
+ TDLOption('lsm_solve_group_tag', "Group independent solutions by tag", [None, "cluster"],
+ more=str, namespace=self,
+ doc="""If you specify a tagname, sources will be grouped by the value of the tag,
+ and each group will be treated as an independent solution."""),
+ TDLOption("solve_I", "I", False, namespace=self),
+ TDLOption("solve_Q", "Q", False, namespace=self),
+ TDLOption("solve_U", "U", False, namespace=self),
+ TDLOption("solve_V", "V", False, namespace=self),
+ TDLOption("solve_spi", "spectral index", False, namespace=self),
+ TDLOption("solve_pos", "position", False, namespace=self),
+ TDLOption("solve_RM", "rotation measure", False, namespace=self),
+ TDLOption("solve_shape", "shape (for extended sources)", False, namespace=self),
+ toggle='solvable_sources', namespace=self,
+ )
+ ]
+ return self._compile_opts
- def source_list (self,ns,max_sources=None,**kw):
- """Reads LSM and returns a list of Meow objects.
- ns is node scope in which they will be created.
- Keyword arguments may be used to indicate which of the source attributes are to be
- created as Parms, use e.g. I=Meow.Parm(tags="flux") for this.
- The use_parms option may override this.
- """;
- if self.filename is None:
- return [];
- # load the sky model
- if self.lsm is None:
- self.lsm = Tigger.load(self.filename);
+ def runtime_options(self):
+ """Makes and returns list of compile-time options"""
+ # no runtime options, for now
+ return self._runtime_opts
- # sort by brightness
- sources = sorted(self.lsm.sources,lambda a,b:cmp(b.brightness(),a.brightness()));
+ # helper function for use with SourceSubsetSelector below
+ @staticmethod
+ def _getTagValue(src, tag):
+ """Helper function: looks for the given tag in the source, or in its sub-objects"""
+ for obj in src, src.pos, src.flux, getattr(src, 'shape', None), getattr(src, 'spectrum', None):
+ if obj is not None and hasattr(obj, tag):
+ return getattr(obj, tag)
+ return None
- # extract subset, if specified
- sources = SourceSubsetSelector.filter_subset(self.lsm_subset,sources,self._getTagValue);
- # get nulls subset
- if self.null_subset:
- nulls = set([src.name for src in SourceSubsetSelector.filter_subset(self.null_subset,sources)]);
- else:
- nulls = set();
- parm = Meow.Parm(tags="source solvable");
- # make copy of kw dict to be used for sources not in solvable set
- kw_nonsolve = dict(kw);
- # and update kw dict to be used for sources in solvable set
- # this will be a dict of lists of solvable subgroups
- parms = [];
- subgroups = {};
- if self.solvable_sources:
- subgroup_order = [];
- for sgname in _SubgroupOrder:
- if getattr(self,'solve_%s'%sgname):
- sg = subgroups[sgname] = [];
- subgroup_order.append(sgname);
+ def source_list(self, ns, max_sources=None, **kw):
+ """Reads LSM and returns a list of Meow objects.
+ ns is node scope in which they will be created.
+ Keyword arguments may be used to indicate which of the source attributes are to be
+ created as Parms, use e.g. I=Meow.Parm(tags="flux") for this.
+ The use_parms option may override this.
+ """
+ if self.filename is None:
+ return []
+ # load the sky model
+ if self.lsm is None:
+ self.lsm = Tigger.load(self.filename)
- # make Meow list
- source_model = []
+ # sort by brightness
+ sources = sorted(self.lsm.sources, lambda a, b: cmp(b.brightness(), a.brightness()))
- for src in sources:
- is_null = src.name in nulls;
- # this will be True if this source has solvable parms
- solvable = self.solvable_sources and not is_null and ( not self.lsm_solvable_tag
- or getattr(src,self.lsm_solvable_tag,False) );
- if solvable:
- # independent groups?
- if self.lsm_solve_group_tag:
- independent_sg = sgname = "%s:%s"%(self.lsm_solve_group_tag,getattr(src,self.lsm_solve_group_tag,"unknown"));
- else:
- independent_sg = "";
- sgname = 'source:%s'%src.name;
- if sgname in subgroups:
- sgsource = subgroups[sgname];
+ # extract subset, if specified
+ sources = SourceSubsetSelector.filter_subset(self.lsm_subset, sources, self._getTagValue)
+ # get nulls subset
+ if self.null_subset:
+ nulls = set([src.name for src in SourceSubsetSelector.filter_subset(self.null_subset, sources)])
else:
- sgsource = subgroups[sgname] = [];
- subgroup_order.append(sgname);
- # make dict of source parametrs: for each parameter we have a value,subgroup pair
- if is_null:
- attrs = dict(ra=src.pos.ra,dec=src.pos.dec,I=0,Q=None,U=None,V=None,RM=None,spi=None,freq0=None);
- else:
- attrs = dict(
- ra= src.pos.ra,
- dec= src.pos.dec,
- I= src.flux.I,
- Q= getattr(src.flux,'Q',None),
- U= getattr(src.flux,'U',None),
- V= getattr(src.flux,'V',None),
- RM= getattr(src.flux,'rm',None),
- freq0= getattr(src.flux,'freq0',None) or (src.spectrum and getattr(src.spectrum,'freq0',None)),
- spi= src.spectrum and getattr(src.spectrum,'spi',None)
- );
- if not is_null and isinstance(src.shape,ModelClasses.Gaussian):
- attrs['lproj'] = src.shape.ex*math.sin(src.shape.pa);
- attrs['mproj'] = src.shape.ex*math.cos(src.shape.pa);
- attrs['ratio'] = src.shape.ey/src.shape.ex;
- # construct parms or constants for source attributes, depending on whether the source is solvable or not
- # If source is solvable and this particular attribute is solvable, replace
- # value in attrs dict with a Meq.Parm.
- if solvable:
- for parmname,value in attrs.items():
- sgname = _Subgroups.get(parmname,None);
- if sgname in subgroups:
- solvable = True;
- parm = attrs[parmname] = ns[src.name](parmname) << Meq.Parm(value or 0,
- tags=["solvable",sgname],solve_group=independent_sg);
- subgroups[sgname].append(parm);
- sgsource.append(parm);
- parms.append(parm);
+ nulls = set()
+ parm = Meow.Parm(tags="source solvable")
+ # make copy of kw dict to be used for sources not in solvable set
+ kw_nonsolve = dict(kw)
+ # and update kw dict to be used for sources in solvable set
+ # this will be a dict of lists of solvable subgroups
+ parms = []
+ subgroups = {}
+ if self.solvable_sources:
+ subgroup_order = []
+ for sgname in _SubgroupOrder:
+ if getattr(self, 'solve_%s' % sgname):
+ sg = subgroups[sgname] = []
+ subgroup_order.append(sgname)
- # construct a direction
- direction = Meow.Direction(ns,src.name,attrs['ra'],attrs['dec'],static=not solvable or not self.solve_pos);
+ # make Meow list
+ source_model = []
- # construct a point source or gaussian or FITS image, depending on source shape class
- if src.shape is None or is_null:
- msrc = Meow.PointSource(ns,name=src.name,
- I=attrs['I'],Q=attrs['Q'],U=attrs['U'],V=attrs['V'],
- direction=direction,
- spi=attrs['spi'],freq0=attrs['freq0'],RM=attrs['RM']);
- elif isinstance(src.shape,ModelClasses.Gaussian):
- msrc = Meow.GaussianSource(ns,name=src.name,
- I=attrs['I'],Q=attrs['Q'],U=attrs['U'],V=attrs['V'],
- direction=direction,
- spi=attrs['spi'],freq0=attrs['freq0'],
- lproj=attrs['lproj'],mproj=attrs['mproj'],ratio=attrs['ratio']);
- if solvable and 'shape' in subgroups:
- subgroups['pos'] += direction.get_solvables();
- elif isinstance(src.shape,ModelClasses.FITSImage):
- msrc = Meow.FITSImageComponent(ns,name=src.name,
- filename=src.shape.filename,
- direction=direction);
- msrc.set_options(fft_pad_factor=(src.shape.pad or 2));
+ for src in sources:
+ is_null = src.name in nulls
+ # this will be True if this source has solvable parms
+ solvable = self.solvable_sources and not is_null and (not self.lsm_solvable_tag
+ or getattr(src, self.lsm_solvable_tag, False))
+ if solvable:
+ # independent groups?
+ if self.lsm_solve_group_tag:
+ independent_sg = sgname = "%s:%s" % (
+ self.lsm_solve_group_tag, getattr(src, self.lsm_solve_group_tag, "unknown"))
+ else:
+ independent_sg = ""
+ sgname = 'source:%s' % src.name
+ if sgname in subgroups:
+ sgsource = subgroups[sgname]
+ else:
+ sgsource = subgroups[sgname] = []
+ subgroup_order.append(sgname)
+ # make dict of source parametrs: for each parameter we have a value,subgroup pair
+ if is_null:
+ attrs = dict(ra=src.pos.ra, dec=src.pos.dec, I=0, Q=None, U=None, V=None, RM=None, spi=None,
+ freq0=None)
+ else:
+ attrs = dict(
+ ra=src.pos.ra,
+ dec=src.pos.dec,
+ I=src.flux.I,
+ Q=getattr(src.flux, 'Q', None),
+ U=getattr(src.flux, 'U', None),
+ V=getattr(src.flux, 'V', None),
+ RM=getattr(src.flux, 'rm', None),
+ freq0=getattr(src.flux, 'freq0', None) or (src.spectrum and getattr(src.spectrum, 'freq0', None)),
+ spi=src.spectrum and getattr(src.spectrum, 'spi', None)
+ )
+ if not is_null and isinstance(src.shape, ModelClasses.Gaussian):
+ attrs['lproj'] = src.shape.ex * math.sin(src.shape.pa)
+ attrs['mproj'] = src.shape.ex * math.cos(src.shape.pa)
+ attrs['ratio'] = src.shape.ey / src.shape.ex
+ # construct parms or constants for source attributes, depending on whether the source is solvable or not
+ # If source is solvable and this particular attribute is solvable, replace
+ # value in attrs dict with a Meq.Parm.
+ if solvable:
+ for parmname, value in attrs.items():
+ sgname = _Subgroups.get(parmname, None)
+ if sgname in subgroups:
+ solvable = True
+ parm = attrs[parmname] = ns[src.name](parmname) << Meq.Parm(value or 0,
+ tags=["solvable", sgname],
+ solve_group=independent_sg)
+ subgroups[sgname].append(parm)
+ sgsource.append(parm)
+ parms.append(parm)
- msrc.solvable = solvable;
+ # construct a direction
+ direction = Meow.Direction(ns, src.name, attrs['ra'], attrs['dec'],
+ static=not solvable or not self.solve_pos)
- # copy standard attributes from sub-objects
- for subobj in src.flux,src.shape,src.spectrum:
- if subobj:
- for attr,val in src.flux.getAttributes():
- msrc.set_attr(attr,val);
- # copy all extra attrs from source object
- for attr,val in src.getExtraAttributes():
- msrc.set_attr(attr,val);
+ # construct a point source or gaussian or FITS image, depending on source shape class
+ if src.shape is None or is_null:
+ msrc = Meow.PointSource(ns, name=src.name,
+ I=attrs['I'], Q=attrs['Q'], U=attrs['U'], V=attrs['V'],
+ direction=direction,
+ spi=attrs['spi'], freq0=attrs['freq0'], RM=attrs['RM'])
+ elif isinstance(src.shape, ModelClasses.Gaussian):
+ msrc = Meow.GaussianSource(ns, name=src.name,
+ I=attrs['I'], Q=attrs['Q'], U=attrs['U'], V=attrs['V'],
+ direction=direction,
+ spi=attrs['spi'], freq0=attrs['freq0'],
+ lproj=attrs['lproj'], mproj=attrs['mproj'], ratio=attrs['ratio'])
+ if solvable and 'shape' in subgroups:
+ subgroups['pos'] += direction.get_solvables()
+ elif isinstance(src.shape, ModelClasses.FITSImage):
+ msrc = Meow.FITSImageComponent(ns, name=src.name,
+ filename=src.shape.filename,
+ direction=direction)
+ msrc.set_options(fft_pad_factor=(src.shape.pad or 2))
- # make sure Iapp exists (init with I if it doesn't)
- if msrc.get_attr('Iapp',None) is None:
- msrc.set_attr('Iapp',src.flux.I);
+ msrc.solvable = solvable
- source_model.append(msrc);
+ # copy standard attributes from sub-objects
+ for subobj in src.flux, src.shape, src.spectrum:
+ if subobj:
+ for attr, val in src.flux.getAttributes():
+ msrc.set_attr(attr, val)
+ # copy all extra attrs from source object
+ for attr, val in src.getExtraAttributes():
+ msrc.set_attr(attr, val)
- # if any solvable parms were made, make a parmgroup and solve job for them
- if parms:
- if os.path.isdir(self.filename):
- table_name = os.path.join(self.filename,"sources.fmep");
- else:
- table_name = os.path.splitext(self.filename)[0]+".fmep";
- # make list of Subgroup objects for every non-empty subgroup
- sgs = [];
- for sgname in subgroup_order:
- sglist = subgroups.get(sgname,None);
- if sglist:
- sgs.append(Meow.ParmGroup.Subgroup(sgname,sglist));
- # make main parm group
- pg_src = Meow.ParmGroup.ParmGroup("source parameters",parms,
- subgroups=sgs,
- table_name=table_name,table_in_ms=False,bookmark=True);
- # now make a solvejobs for the source
- Meow.ParmGroup.SolveJob("cal_source","Solve for source parameters",pg_src);
+ # make sure Iapp exists (init with I if it doesn't)
+ if msrc.get_attr('Iapp', None) is None:
+ msrc.set_attr('Iapp', src.flux.I)
+ source_model.append(msrc)
- return source_model;
+ # if any solvable parms were made, make a parmgroup and solve job for them
+ if parms:
+ if os.path.isdir(self.filename):
+ table_name = os.path.join(self.filename, "sources.fmep")
+ else:
+ table_name = os.path.splitext(self.filename)[0] + ".fmep"
+ # make list of Subgroup objects for every non-empty subgroup
+ sgs = []
+ for sgname in subgroup_order:
+ sglist = subgroups.get(sgname, None)
+ if sglist:
+ sgs.append(Meow.ParmGroup.Subgroup(sgname, sglist))
+ # make main parm group
+ pg_src = Meow.ParmGroup.ParmGroup("source parameters", parms,
+ subgroups=sgs,
+ table_name=table_name, table_in_ms=False, bookmark=True)
+ # now make a solvejobs for the source
+ Meow.ParmGroup.SolveJob("cal_source", "Solve for source parameters", pg_src)
+ return source_model
diff --git a/Tigger/Tools/FITSHeaders.py b/Tigger/Tools/FITSHeaders.py
index 22a9c00..4c426bd 100644
--- a/Tigger/Tools/FITSHeaders.py
+++ b/Tigger/Tools/FITSHeaders.py
@@ -1,24 +1,22 @@
# -*- coding: utf-8 -*-
-"""Defines various useful functions and constants for parsing FITS headers""";
-
+"""Defines various useful functions and constants for parsing FITS headers"""
# Table of Stokes parameters corresponding to Stokes axis indices
# Taken from Table 7, Greisen, E. W., and Calabretta, M. R., Astronomy & Astrophysics, 395, 1061-1075, 2002
# (http://www.aanda.org/index.php?option=article&access=bibcode&bibcode=2002A%2526A...395.1061GFUL)
# So StokesNames[1] == "I", StokesNames[-1] == "RR", StokesNames[-8] == "YX", etc.
-StokesNames = [ "","I","Q","U","V","YX","XY","YY","XX","LR","RL","LL","RR" ];
+StokesNames = ["", "I", "Q", "U", "V", "YX", "XY", "YY", "XX", "LR", "RL", "LL", "RR"]
# complex axis convention
-ComplexNames = [ "","real","imag","weight" ];
-
+ComplexNames = ["", "real", "imag", "weight"]
-def isAxisTypeX (ctype):
- """Checks if given CTYPE corresponds to the X axis""";
- return any([ ctype.startswith(prefix) for prefix in "RA","GLON","ELON","HLON","SLON" ]) or \
- ctype in ("L","X","LL","U","UU");
+def isAxisTypeX(ctype):
+ """Checks if given CTYPE corresponds to the X axis"""
+ return any([ctype.startswith(prefix) for prefix in "RA", "GLON", "ELON", "HLON", "SLON"]) or \
+ ctype in ("L", "X", "LL", "U", "UU")
-def isAxisTypeY (ctype):
- """Checks if given CTYPE corresponds to the Y axis""";
- return any([ ctype.startswith(prefix) for prefix in "DEC","GLAT","ELAT","HLAT","SLAT" ]) or \
- ctype in ("M","Y","MM","V","VV");
+def isAxisTypeY(ctype):
+ """Checks if given CTYPE corresponds to the Y axis"""
+ return any([ctype.startswith(prefix) for prefix in "DEC", "GLAT", "ELAT", "HLAT", "SLAT"]) or \
+ ctype in ("M", "Y", "MM", "V", "VV")
diff --git a/Tigger/Tools/Imaging.py b/Tigger/Tools/Imaging.py
index 990c165..c5e55e4 100644
--- a/Tigger/Tools/Imaging.py
+++ b/Tigger/Tools/Imaging.py
@@ -2,7 +2,7 @@
# -*- coding: utf-8 -*-
#
-#% $Id$
+# % $Id$
#
#
# Copyright (C) 2002-2011
@@ -26,526 +26,545 @@
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
-import Kittens.utils
-from astropy.io import fits as pyfits
import math
-import numpy
-from Tigger.Coordinates import Projection
-import FITSHeaders
+import Kittens.utils
+# init debug printing
+import Kittens.utils
+import astLib.astWCS
+import numpy
+from astropy.io import fits as pyfits
from scipy.ndimage.filters import convolve
from scipy.ndimage.interpolation import map_coordinates
-import astLib.astWCS
-# init debug printing
-import Kittens.utils
-_verbosity = Kittens.utils.verbosity(name="imaging");
-dprint = _verbosity.dprint;
-dprintf = _verbosity.dprintf;
+import FITSHeaders
+from Tigger.Coordinates import Projection
+
+_verbosity = Kittens.utils.verbosity(name="imaging")
+dprint = _verbosity.dprint
+dprintf = _verbosity.dprintf
# conversion factors from radians
-DEG = 180/math.pi;
-ARCMIN = DEG*60;
-ARCSEC = ARCMIN*60;
-FWHM = math.sqrt(math.log(256)); # which is 2.3548;
-
-def fitPsf (filename,cropsize=None):
- """Fits a Gaussian PSF to the FITS file given by 'filename'.
- If cropsize is specified, crops the central cropsize X cropsize pixels before fitting.
- Else determines cropsize by looking for the first negative sidelobe from the centre outwards.
- Returns maj_sigma,min_sigma,pa_NE (in radians)
- """;
- # read PSF from file
- psf = pyfits.open(filename)[0];
- hdr = psf.header;
- psf = psf.data;
- dprintf(2,"Read PSF of shape %s from file %s\n",psf.shape,filename);
- # remove stokes and freq axes
- if len(psf.shape) == 4:
- psf = psf[0,0,:,:];
- elif len(psf.shape) == 3:
- psf = psf[0,:,:];
- else:
- raise RuntimeError,"illegal PSF shape %s"+psf.shape;
- nx,ny = psf.shape;
- # crop the central region
- if cropsize:
- size = cropsize;
- psf = psf[(nx-size)//2:(nx+size)//2,(ny-size)//2:(ny+size)//2];
- # if size not specified, then auto-crop by looking for the first negative value starting from the center
- # this will break on very extended diagonal PSFs, but that's a pathological case
- else:
- ix = numpy.where(psf[:,ny//2]<0)[0];
- ix0 = max(ix[ixnx//2]);
- iy = numpy.where(psf[nx//2,:]<0)[0];
- iy0 = max(iy[iyny//2]);
- print ix0,ix1,iy0,iy1;
- psf = psf[ix0:ix1,iy0:iy1];
- psf[psf<0] = 0;
-
- # estimate gaussian parameters, then fit
- import gaussfitter2
- parms0 = gaussfitter2.moments(psf,circle=0,rotate=1,vheight=0);
- print parms0;
- dprint(2,"Estimated parameters are",parms0);
- parms = gaussfitter2.gaussfit(psf,None,parms0,autoderiv=1,return_all=0,circle=0,rotate=1,vheight=0);
- dprint(0,"Fitted parameters are",parms);
-
- # now swap x and y around, since our axes are in reverse order
- ampl,y0,x0,sy,sx,rot = parms;
-
- # get pixel sizes in radians (by constructing a projection object)
- proj = Projection.FITSWCS(hdr);
- xscale,yscale = proj.xscale,proj.yscale;
-
- sx_rad = abs(sx * proj.xscale);
- sy_rad = abs(sy * proj.yscale);
- rot -= 90; # convert West through North PA into the conventional North through East
- if sx_rad < sy_rad:
- sx_rad,sy_rad = sy_rad,sx_rad;
- rot -= 90;
- rot %= 180;
-
- dprintf(1,"Fitted gaussian PSF FWHM of %f x %f pixels (%f x %f arcsec), PA %f deg\n",sx*FWHM,sy*FWHM,sx_rad*FWHM*ARCSEC,sy_rad*FWHM*ARCSEC,rot);
-
- return sx_rad,sy_rad,rot/DEG;
-
-def convolveGaussian (x1,y1,p1,x2,y2,p2):
+DEG = 180 / math.pi
+ARCMIN = DEG * 60
+ARCSEC = ARCMIN * 60
+FWHM = math.sqrt(math.log(256)); # which is 2.3548
+
+
+def fitPsf(filename, cropsize=None):
+ """Fits a Gaussian PSF to the FITS file given by 'filename'.
+ If cropsize is specified, crops the central cropsize X cropsize pixels before fitting.
+ Else determines cropsize by looking for the first negative sidelobe from the centre outwards.
+ Returns maj_sigma,min_sigma,pa_NE (in radians)
+ """
+ # read PSF from file
+ psf = pyfits.open(filename)[0]
+ hdr = psf.header
+ psf = psf.data
+ dprintf(2, "Read PSF of shape %s from file %s\n", psf.shape, filename)
+ # remove stokes and freq axes
+ if len(psf.shape) == 4:
+ psf = psf[0, 0, :, :]
+ elif len(psf.shape) == 3:
+ psf = psf[0, :, :]
+ else:
+ raise RuntimeError, "illegal PSF shape %s" + psf.shape
+ nx, ny = psf.shape
+ # crop the central region
+ if cropsize:
+ size = cropsize
+ psf = psf[(nx - size) // 2:(nx + size) // 2, (ny - size) // 2:(ny + size) // 2]
+ # if size not specified, then auto-crop by looking for the first negative value starting from the center
+ # this will break on very extended diagonal PSFs, but that's a pathological case
+ else:
+ ix = numpy.where(psf[:, ny // 2] < 0)[0]
+ ix0 = max(ix[ix < nx // 2])
+ ix1 = min(ix[ix > nx // 2])
+ iy = numpy.where(psf[nx // 2, :] < 0)[0]
+ iy0 = max(iy[iy < ny // 2])
+ iy1 = min(iy[iy > ny // 2])
+ print ix0, ix1, iy0, iy1
+ psf = psf[ix0:ix1, iy0:iy1]
+ psf[psf < 0] = 0
+
+ # estimate gaussian parameters, then fit
+ import gaussfitter2
+ parms0 = gaussfitter2.moments(psf, circle=0, rotate=1, vheight=0)
+ print parms0
+ dprint(2, "Estimated parameters are", parms0)
+ parms = gaussfitter2.gaussfit(psf, None, parms0, autoderiv=1, return_all=0, circle=0, rotate=1, vheight=0)
+ dprint(0, "Fitted parameters are", parms)
+
+ # now swap x and y around, since our axes are in reverse order
+ ampl, y0, x0, sy, sx, rot = parms
+
+ # get pixel sizes in radians (by constructing a projection object)
+ proj = Projection.FITSWCS(hdr)
+ xscale, yscale = proj.xscale, proj.yscale
+
+ sx_rad = abs(sx * proj.xscale)
+ sy_rad = abs(sy * proj.yscale)
+ rot -= 90; # convert West through North PA into the conventional North through East
+ if sx_rad < sy_rad:
+ sx_rad, sy_rad = sy_rad, sx_rad
+ rot -= 90
+ rot %= 180
+
+ dprintf(1, "Fitted gaussian PSF FWHM of %f x %f pixels (%f x %f arcsec), PA %f deg\n", sx * FWHM, sy * FWHM,
+ sx_rad * FWHM * ARCSEC, sy_rad * FWHM * ARCSEC, rot)
+
+ return sx_rad, sy_rad, rot / DEG
+
+
+def convolveGaussian(x1, y1, p1, x2, y2, p2):
"""convolves a Gaussian with extents x1,y1 and position angle p1
with another Gaussian given by x2,y2,p2, and returns the extents
and angle of the resulting Gaussian."""
# convert to Fourier plane extents, FT transforms a -> pi^2/a
- u1,v1,u2,v2 = [ (math.pi**2)*2*a**2 for a in x1,y1,x2,y2 ];
-# print "uv coeffs",u1,v1,u2,v2;
- c1,s1 = math.cos(p1),math.sin(p1);
- c2,s2 = math.cos(p2),math.sin(p2);
+ u1, v1, u2, v2 = [(math.pi ** 2) * 2 * a ** 2 for a in x1, y1, x2, y2]
+ # print "uv coeffs",u1,v1,u2,v2
+ c1, s1 = math.cos(p1), math.sin(p1)
+ c2, s2 = math.cos(p2), math.sin(p2)
# in the FT, this is a product of two Gaussians, each of the form:
# exp(-( u*(cx+sy)^2 + v*(cx-sy)^2))
# note how we rotate BACK through the position angle
# The product is necessarily a Gaussian itself, of the form
# exp(-(a.u^2+2b.u.v+c.v^2))
# So we just need to collect the rotated Gaussian coefficients into a, b and c
- a = u1*c1**2+v1*s1**2+u2*c2**2+v2*s2**2
- c = u1*s1**2+v1*c1**2+u2*s2**2+v2*c2**2
- b = c1*s1*(u1-v1)+c2*s2*(u2-v2)
-# print "a,b,c",a,b,c;
+ a = u1 * c1 ** 2 + v1 * s1 ** 2 + u2 * c2 ** 2 + v2 * s2 ** 2
+ c = u1 * s1 ** 2 + v1 * c1 ** 2 + u2 * s2 ** 2 + v2 * c2 ** 2
+ b = c1 * s1 * (u1 - v1) + c2 * s2 * (u2 - v2)
+ # print "a,b,c",a,b,c
# ok, find semi-major axes a1, b1 using the formula from http://mathworld.wolfram.com/Ellipse.html eq. 21-22
# to go from a general quadratic curve (with a,b,c given above, d=f=0, g=-1) to semi-axes a',b'
- D = math.sqrt((a-c)**2+4*b**2)
- E = a+c
- a1 = math.sqrt(2/(E-D))
- b1 = math.sqrt(2/(E+D))
-# print "a',b'",a1,b1,"coeffs",1/(a1**2),1/(b1**2)
+ D = math.sqrt((a - c) ** 2 + 4 * b ** 2)
+ E = a + c
+ a1 = math.sqrt(2 / (E - D))
+ b1 = math.sqrt(2 / (E + D))
+ # print "a',b'",a1,b1,"coeffs",1/(a1**2),1/(b1**2)
# and derive rotation angle
if b:
- p1 = math.atan2(2*b,a-c)/2 + math.pi/2
-# if a > c:
-# p1 += math.pi/2
+ p1 = math.atan2(2 * b, a - c) / 2 + math.pi / 2
+ # if a > c:
+ # p1 += math.pi/2
else:
- p1 = 0 if a <= c else math.pi/2
-# print "rotation",p1/DEG
+ p1 = 0 if a <= c else math.pi / 2
+ # print "rotation",p1/DEG
# ok, convert a1,b1 from uv-plane to image plane
- x1 = math.sqrt(1/(2*math.pi**2*a1**2))
- y1 = math.sqrt(1/(2*math.pi**2*b1**2))
+ x1 = math.sqrt(1 / (2 * math.pi ** 2 * a1 ** 2))
+ y1 = math.sqrt(1 / (2 * math.pi ** 2 * b1 ** 2))
# note that because of reciprocality, y1 becomes the major axis and x1 the minor axis, so adjust for that
- return y1,x1,(p1-math.pi/2)%math.pi;
-
-def getImageCube (fitshdu,filename="",extra_axes=None):
- """Converts a FITS HDU (consisting of a header and data) into a 4+-dim numpy array where the
- first two axes are x and y, the third is Stokes (possibly of length 1, if missing in the
- original image), and the rest are either as found in the FITS header (if extra_axes=None),
- or in the order specified by CTYPE in extra_axes (if present, else a dummy axis of size 1 is inserted),
- with axes not present in extra_axes removed by taking the 0-th plane along each.
- Returns tuple of
- array,stokes_list,extra_axes_ctype_list,removed_axes_ctype_list
- e.g. array,("I","Q"),("FREQ--FOO","TIME--BAR")
- """
- hdr = fitshdu.header;
- data = fitshdu.data;
- # recognized axes
- ix = iy = istokes = None;
- naxis = len(data.shape);
- # other axes which will be returned
- other_axes = [];
- other_axes_ctype = [];
- remove_axes = [];
- remove_axes_ctype = [];
- # match axis ctype
- # this makes FREQ equivalent to FELO*
- def match_ctype (ctype,ctype_list):
- for i,ct in enumerate(ctype_list):
- if ct == ctype or ( ct == "FREQ" and ctype.startswith("FELO") ) or ( ctype == "FREQ" and ct.startswith("FELO") ):
- return i;
- return None;
- # identify X, Y and stokes axes
- for n in range(naxis):
- iax = naxis-1-n;
- axs = str(n+1);
- ctype = hdr.get('CTYPE'+axs).strip().upper();
- if ix is None and FITSHeaders.isAxisTypeX(ctype):
- ix = iax; # in numpy order, axes are reversed
- elif iy is None and FITSHeaders.isAxisTypeY(ctype):
- iy = iax;
- elif ctype == 'STOKES':
- if istokes is not None:
- raise ValueError,"duplicate STOKES axis in FITS file %s"%filename;
- istokes = iax;
- crval = hdr.get('CRVAL'+axs,0);
- cdelt = hdr.get('CDELT'+axs,1);
- crpix = hdr.get('CRPIX'+axs,1)-1;
- values = map(int,list(crval + (numpy.arange(data.shape[iax]) - crpix)*cdelt));
- stokes_names = [ (FITSHeaders.StokesNames[i]
- if i>0 and i tl[-1] or tx2 < tl[0] or ty1 > tm[-1] or ty2 < tm[0]:
- self._target_slice = None,None;
- return;
- tx1 = max(0,int(math.floor(tx1)));
- tx2 = min(len(tl),int(math.floor(tx2+1)));
- ty1 = max(0,int(math.floor(ty1)));
- ty2 = min(len(tm),int(math.floor(ty2+1)));
- tl = tl[tx1:tx2];
- tm = tm[ty1:ty2];
- dprint(4,"overlap target pixels are %d:%d and %d:%d"%(tx1,tx2,ty1,ty2));
-
- #### The code below works but can be very slow (~minutes) when doing large images, because of WCS
- ## make target lm matrix
- #tmat = numpy.zeros((2,len(tl),len(tm)));
- #tmat[0,...] = tl[:,numpy.newaxis];
- #tmat[1,...] = tm[numpy.newaxis,:];
- ## convert this to radec. Go through list since that's what Projection expects
- #dprint(4,"converting %d target l/m pixel coordinates to radec"%(len(tl)*len(tm)));
- #ra,dec = tproj.radec(tmat[0,...].ravel(),tmat[1,...].ravel())
- #dprint(4,"converting radec to source l/m");
- #tls,tms = sproj.lm(ra,dec);
- #tmat[0,...] = tls.reshape((len(tl),len(tm)));
- #tmat[1,...] = tms.reshape((len(tl),len(tm)));
-
- #### my alternative conversion code
- ## source to target is always an affine transform (one image projected into the plane of another, right?), so
- ## use WCS to map the corners, and figure out a linear transform from there
-
- # this maps three corners
- t00 = sproj.lm(*tproj.radec(tl[0],tm[0]));
- t1x = sproj.lm(*tproj.radec(tl[-1],tm[0]));
- t1y = sproj.lm(*tproj.radec(tl[0],tm[-1]));
-
- tmat = numpy.zeros((2,len(tl),len(tm)));
- tlnorm = (tl-tl[0])/(tl[-1]-tl[0]);
- tmnorm = (tm-tm[0])/(tm[-1]-tm[0]);
- tmat[0,...] = t00[0] + (tlnorm*(t1x[0]-t00[0]))[:,numpy.newaxis] + (tmnorm*(t1y[0]-t00[0]))[numpy.newaxis,:];
- tmat[1,...] = t00[1] + (tmnorm*(t1y[1]-t00[1]))[numpy.newaxis,:] + (tlnorm*(t1x[1]-t00[1]))[:,numpy.newaxis];
-
- dprint(4,"setting up slices");
- # ok, now find pixels in tmat that are within the source image extent
- tmask = (sl[0]<=tmat[0,...])&(tmat[0,...]<=sl[-1])&(sm[0]<=tmat[1,...])&(tmat[1,...]<=sm[-1]);
- # find extents along target's l and m axis
- # tmask_l/m is true for each target column/row that has pixels within the source image
- tmask_l = numpy.where(tmask.sum(1)>0)[0];
- tmask_m = numpy.where(tmask.sum(0)>0)[0];
- # check if there's no overlap at all -- return then
- if not len(tmask_l) or not len(tmask_m):
- self._target_slice = None,None;
- return;
- # ok, now we know over which pixels of the target image need to be interpolated
- ix0,ix1 = tmask_l[0],tmask_l[-1]+1;
- iy0,iy1 = tmask_m[0],tmask_m[-1]+1;
- self._target_slice = slice(ix0+tx1,ix1+tx1),slice(iy0+ty1,iy1+ty1);
- dprint(4,"slices are",ix0,ix1,iy0,iy1);
- # make [2,nx,ny] array of interpolation coordinates
- self._target_coords = tmat[:,ix0:ix1,iy0:iy1];
-
- def targetSlice (self):
- return self._target_slice;
-
- def __call__ (self,image):
- if self._target_slice[0] is None:
- return 0;
+ hdr = fitshdu.header
+ data = fitshdu.data
+ # recognized axes
+ ix = iy = istokes = None
+ naxis = len(data.shape)
+ # other axes which will be returned
+ other_axes = []
+ other_axes_ctype = []
+ remove_axes = []
+ remove_axes_ctype = []
+
+ # match axis ctype
+ # this makes FREQ equivalent to FELO*
+ def match_ctype(ctype, ctype_list):
+ for i, ct in enumerate(ctype_list):
+ if ct == ctype or (ct == "FREQ" and ctype.startswith("FELO")) or (
+ ctype == "FREQ" and ct.startswith("FELO")):
+ return i
+ return None
+
+ # identify X, Y and stokes axes
+ for n in range(naxis):
+ iax = naxis - 1 - n
+ axs = str(n + 1)
+ ctype = hdr.get('CTYPE' + axs).strip().upper()
+ if ix is None and FITSHeaders.isAxisTypeX(ctype):
+ ix = iax; # in numpy order, axes are reversed
+ elif iy is None and FITSHeaders.isAxisTypeY(ctype):
+ iy = iax
+ elif ctype == 'STOKES':
+ if istokes is not None:
+ raise ValueError, "duplicate STOKES axis in FITS file %s" % filename
+ istokes = iax
+ crval = hdr.get('CRVAL' + axs, 0)
+ cdelt = hdr.get('CDELT' + axs, 1)
+ crpix = hdr.get('CRPIX' + axs, 1) - 1
+ values = map(int, list(crval + (numpy.arange(data.shape[iax]) - crpix) * cdelt))
+ stokes_names = [(FITSHeaders.StokesNames[i]
+ if i > 0 and i < len(FITSHeaders.StokesNames) else "%d" % i) for i in values]
+ else:
+ other_axes.append(iax)
+ other_axes_ctype.append(ctype)
+ # not found?
+ if ix is None or iy is None:
+ raise ValueError, "FITS file %s does not appear to contain an X and/or Y axis" % filename
+ # form up shape of resulting image, and order of axes for transpose
+ shape = [data.shape[ix], data.shape[iy]]
+ axes = [ix, iy]
+ # add stokes axis
+ if istokes is None:
+ shape.append(1)
+ stokes_names = ("I",)
else:
- return map_coordinates(image,self._target_coords);
-
-def restoreSources (fits_hdu,sources,gmaj,gmin=None,grot=0,freq=None,primary_beam=None,apply_beamgain=False,ignore_nobeam=False):
- """Restores sources (into the given FITSHDU) using a Gaussian PSF given by gmaj/gmin/grot, in radians.
- gmaj/gmin is major/minor sigma parameter; grot is PA in the North thru East convention (PA=0 is N).
-
- If gmaj=0, uses delta functions instead.
- If freq is specified, converts flux to the specified frequency.
- If primary_beam is specified, uses it to apply a PB gain to each source. This must be a function of two arguments:
- r and freq, returning the power beam gain.
- If apply_beamgain is true, applies beamgain atribute instead, if this exists.
- Source tagged 'nobeam' will not have the PB gain applied, unless ignore_nobeam=True
- """;
- hdr = fits_hdu.header;
- data,stokes,extra_data_axes,dum = getImageCube(fits_hdu);
- # create projection object, using pixel coordinates
- proj = Projection.FITSWCSpix(hdr);
- naxis = len(data.shape);
- nx = data.shape[0];
- ny = data.shape[1];
- dprintf(1,"Read image of shape %s\n",data.shape);
- # Now we make "indexer" tuples. These use the numpy.newarray index to turn elementary vectors into
- # full arrays of the same number of dimensions as 'data' (data can be 2-, 3- or 4-dimensional, so we need
- # a general solution.)
- # For e.g. a nfreq x nstokes x ny x nx array, the following objects are created:
- # x_indexer turns n-vector vx into a _,_,_,n array
- # y_indexer turns m-vector vy into a _,_,m,_ array
- # stokes_indexer turns the stokes vector into a _,nst,_,_ array
- # ...where "_" is numpy.newaxis.
- # The happy result of all this is that we can add a Gaussian into the data array at i1:i2,j1:j2 as follows:
- # 1. form up vectors of world coordinates (vx,vy) corresponding to pixel coordinates i1:i2 and j1:j2
- # 2. form up vector of Stokes parameters
- # 3. g = Gauss(vx[x_indexer],vy[y_indexer])*stokes[stokes_indexer]
- # 4. Just say data[j1:j2,i1:2,...] += g
- # This automatically expands all array dimensions as needed.
-
- # This is a helper function, returns an naxis-sized tuple, with slice(None) in the Nth
- # position, and elem_index elsewhere.
- def make_axis_indexer (n,elem_index=numpy.newaxis):
- indexer = [elem_index]*naxis;
- indexer[n] = slice(None);
- return tuple(indexer);
- x_indexer = make_axis_indexer(0);
- y_indexer = make_axis_indexer(1);
- # figure out stokes
- nstokes = len(stokes);
- stokes_vec = numpy.zeros((nstokes,));
- stokes_indexer = make_axis_indexer(2);
- dprint(2,"Stokes are",stokes);
- dprint(2,"Stokes indexing vector is",stokes_indexer);
- # get pixel sizes, in radians
- # gmaj != 0: use gaussian. Estimate PSF box size. We want a +/-5 sigma box
- if gmaj > 0:
- # convert grot from N-E to W-N (which is the more conventional mathematical definition of these things), so X is major axis
- grot += math.pi/2;
- if gmin == 0:
- gmin = gmaj;
- cos_rot = math.cos(grot);
- sin_rot = math.sin(-grot); # rotation is N->E, so swap the sign
- else:
- gmaj = gmin = grot = 0;
- conv_kernels = {};
- # loop over sources in model
- for src in sources:
- # get normalized intensity, if spectral info is available
- if freq is not None and getattr(src,'spectrum',None):
- ni = src.spectrum.normalized_intensity(freq);
- dprintf(3,"Source %s: normalized spectral intensity is %f\n",src.name,ni);
+ shape.append(data.shape[istokes])
+ axes.append(istokes)
+ if extra_axes:
+ # if a fixed order for the extra axes is specified, add the ones we found
+ for ctype in extra_axes:
+ i = match_ctype(ctype, other_axes_ctype)
+ if i is not None:
+ iax = other_axes[i]
+ axes.append(iax)
+ shape.append(data.shape[iax])
+ else:
+ shape.append(1)
+ # add the ones that were not found into the remove list
+ for iaxis, ctype in zip(other_axes, other_axes_ctype):
+ if match_ctype(ctype, extra_axes) is None:
+ axes.append(iaxis)
+ remove_axes.append(iaxis)
+ remove_axes_ctype.append(ctype)
+ # return all extra axes found in header
else:
- ni = 1;
- # multiply that by PB gain, if given
- if ignore_nobeam or not getattr(src,'nobeam',False):
- if apply_beamgain and hasattr(src,'beamgain'):
- ni *= getattr(src,'beamgain');
- elif primary_beam:
- r = getattr(src,'r',None);
- if r is not None:
- pb = primary_beam(r,freq);
- ni *= pb;
- dprintf(3,"Source %s: r=%g pb=%f, normalized intensity is %f\n",src.name,r,pb,ni);
- # process point sources
- if src.typecode in ('pnt','Gau'):
- # pixel coordinates of source
- xsrc,ysrc = proj.lm(src.pos.ra,src.pos.dec);
- # form up stokes vector
- for i,st in enumerate(stokes):
- stokes_vec[i] = getattr(src.flux,st,0)*ni;
- dprintf(3,"Source %s, %s Jy, at pixel %f,%f\n",src.name,stokes_vec,xsrc,ysrc);
- # for gaussian sources, convolve with beam
- if src.typecode == 'Gau':
- pa0 = src.shape.pa+math.pi/2; # convert PA from N->E to conventional W->N
- ex0,ey0 = src.shape.ex/FWHM,src.shape.ey/FWHM; # convert extents from FWHM to sigmas, since gmaj/gmin is in same scale
- if gmaj > 0:
- ex,ey,pa = convolveGaussian(ex0,ey0,pa0,gmaj,gmin,grot);
- # normalize flux by beam/extent ratio
- stokes_vec *= (gmaj*gmin)/(ex*ey);
- #print "%3dx%-3d@%3d * %3dx%-3d@%3d -> %3dx%-3d@%3d"%(
- #ex0 *FWHM*ARCSEC,ey0 *FWHM*ARCSEC,(pa0-math.pi/2)*DEG,
- #gmaj*FWHM*ARCSEC,gmin*FWHM*ARCSEC,(grot-math.pi/2)*DEG,
- #ex *FWHM*ARCSEC,ey *FWHM*ARCSEC,(pa-math.pi/2)*DEG);
+ shape += [data.shape[i] for i in other_axes]
+ axes += other_axes
+ extra_axes = other_axes_ctype
+ # tranpose
+ data = data.transpose(axes)
+ # trim off axes which are to be removed, if we have any
+ if remove_axes:
+ data = data[[Ellipsis] + [0] * len(remove_axes)]
+ # reshape and return
+ return data.reshape(shape), stokes_names, extra_axes, remove_axes_ctype
+
+
+class ImageResampler(object):
+ """This class resamples images from one projection ("source") to another ("target")."""
+
+ def __init__(self, sproj, tproj, sl, sm, tl, tm):
+ """Creates resampler.
+ sproj,tproj are the source and target Projection objects.
+ sl,sm is a (sorted, ascending) list of l,m coordinates in the source image
+ tl,tm is a (sorted, ascending) list of l,m coordinates in the target image
+ """
+ # convert tl,tm to to source coordinates
+ # find the overlap region first, to keeps the number of coordinate conversions to a minimum
+ overlap = astLib.astWCS.findWCSOverlap(sproj.wcs, tproj.wcs)
+ tx2, tx1, ty1, ty2 = overlap['wcs2Pix']
+ # no overlap? stop then
+ if tx1 > tl[-1] or tx2 < tl[0] or ty1 > tm[-1] or ty2 < tm[0]:
+ self._target_slice = None, None
+ return
+ tx1 = max(0, int(math.floor(tx1)))
+ tx2 = min(len(tl), int(math.floor(tx2 + 1)))
+ ty1 = max(0, int(math.floor(ty1)))
+ ty2 = min(len(tm), int(math.floor(ty2 + 1)))
+ tl = tl[tx1:tx2]
+ tm = tm[ty1:ty2]
+ dprint(4, "overlap target pixels are %d:%d and %d:%d" % (tx1, tx2, ty1, ty2))
+
+ #### The code below works but can be very slow (~minutes) when doing large images, because of WCS
+ ## make target lm matrix
+ # tmat = numpy.zeros((2,len(tl),len(tm)))
+ # tmat[0,...] = tl[:,numpy.newaxis]
+ # tmat[1,...] = tm[numpy.newaxis,:]
+ ## convert this to radec. Go through list since that's what Projection expects
+ # dprint(4,"converting %d target l/m pixel coordinates to radec"%(len(tl)*len(tm)))
+ # ra,dec = tproj.radec(tmat[0,...].ravel(),tmat[1,...].ravel())
+ # dprint(4,"converting radec to source l/m")
+ # tls,tms = sproj.lm(ra,dec)
+ # tmat[0,...] = tls.reshape((len(tl),len(tm)))
+ # tmat[1,...] = tms.reshape((len(tl),len(tm)))
+
+ #### my alternative conversion code
+ ## source to target is always an affine transform (one image projected into the plane of another, right?), so
+ ## use WCS to map the corners, and figure out a linear transform from there
+
+ # this maps three corners
+ t00 = sproj.lm(*tproj.radec(tl[0], tm[0]))
+ t1x = sproj.lm(*tproj.radec(tl[-1], tm[0]))
+ t1y = sproj.lm(*tproj.radec(tl[0], tm[-1]))
+
+ tmat = numpy.zeros((2, len(tl), len(tm)))
+ tlnorm = (tl - tl[0]) / (tl[-1] - tl[0])
+ tmnorm = (tm - tm[0]) / (tm[-1] - tm[0])
+ tmat[0, ...] = t00[0] + (tlnorm * (t1x[0] - t00[0]))[:, numpy.newaxis] + (tmnorm * (t1y[0] - t00[0]))[
+ numpy.newaxis, :]
+ tmat[1, ...] = t00[1] + (tmnorm * (t1y[1] - t00[1]))[numpy.newaxis, :] + (tlnorm * (t1x[1] - t00[1]))[:,
+ numpy.newaxis]
+
+ dprint(4, "setting up slices")
+ # ok, now find pixels in tmat that are within the source image extent
+ tmask = (sl[0] <= tmat[0, ...]) & (tmat[0, ...] <= sl[-1]) & (sm[0] <= tmat[1, ...]) & (tmat[1, ...] <= sm[-1])
+ # find extents along target's l and m axis
+ # tmask_l/m is true for each target column/row that has pixels within the source image
+ tmask_l = numpy.where(tmask.sum(1) > 0)[0]
+ tmask_m = numpy.where(tmask.sum(0) > 0)[0]
+ # check if there's no overlap at all -- return then
+ if not len(tmask_l) or not len(tmask_m):
+ self._target_slice = None, None
+ return
+ # ok, now we know over which pixels of the target image need to be interpolated
+ ix0, ix1 = tmask_l[0], tmask_l[-1] + 1
+ iy0, iy1 = tmask_m[0], tmask_m[-1] + 1
+ self._target_slice = slice(ix0 + tx1, ix1 + tx1), slice(iy0 + ty1, iy1 + ty1)
+ dprint(4, "slices are", ix0, ix1, iy0, iy1)
+ # make [2,nx,ny] array of interpolation coordinates
+ self._target_coords = tmat[:, ix0:ix1, iy0:iy1]
+
+ def targetSlice(self):
+ return self._target_slice
+
+ def __call__(self, image):
+ if self._target_slice[0] is None:
+ return 0
else:
- # normalize flux by pixel/extent ratio
- ex,ey,pa = ex0,ey0,pa0;
- stokes_vec *= (abs(proj.xscale*proj.yscale))/(ex*ey);
- else:
- ex,ey,pa = gmaj,gmin,grot;
- # gmaj != 0: use gaussian.
- if ex > 0 or ey > 0:
- # work out restoring box
- box_radius = 5*(max(ex,ey))/min(abs(proj.xscale),abs(proj.yscale));
- dprintf(2,"Will use a box of radius %f pixels for restoration\n",box_radius);
- cos_pa = math.cos(pa);
- sin_pa = math.sin(-pa); # rotation is N->E, so swap the sign
- # pixel coordinates of box around source in which we evaluate the gaussian
- i1 = max(0,int(math.floor(xsrc-box_radius)));
- i2 = min(nx,int(math.ceil(xsrc+box_radius)));
- j1 = max(0,int(math.floor(ysrc-box_radius)));
- j2 = min(ny,int(math.ceil(ysrc+box_radius)));
- # skip sources if box doesn't overlap image
- if i1>=i2 or j1>=j2:
- continue;
- # now we convert pixel indices within the box into world coordinates, relative to source position
- xi = (numpy.arange(i1,i2) - xsrc)*proj.xscale;
- yj = (numpy.arange(j1,j2) - ysrc)*proj.yscale;
- # work out rotated coordinates
- xi1 = (xi*cos_pa)[x_indexer] - (yj*sin_pa)[y_indexer];
- yj1 = (xi*sin_pa)[x_indexer] + (yj*cos_pa)[y_indexer];
- # evaluate gaussian at these, scale up by stokes vector
- gg = stokes_vec[stokes_indexer]*numpy.exp(-((xi1/ex)**2+(yj1/ey)**2)/2.);
- # add into data
- data[i1:i2,j1:j2,...] += gg;
- # else gmaj=0: use delta functions
- else:
- xsrc = int(round(xsrc));
- ysrc = int(round(ysrc));
- # skip sources outside image
- if xsrc < 0 or xsrc >= nx or ysrc < 0 or ysrc >= ny:
- continue;
- xdum = numpy.array([1]);
- ydum = numpy.array([1]);
- data[xsrc:xsrc+1,ysrc:ysrc+1,...] += stokes_vec[stokes_indexer]*xdum[x_indexer]*ydum[y_indexer];
- # process model images -- convolve with PSF and add to data
- elif src.typecode == "FITS":
- modelff = pyfits.open(src.shape.filename);
- model,model_stokes,extra_model_axes,removed_model_axes = \
- getImageCube(modelff[0],src.shape.filename,extra_axes=extra_data_axes);
- modelproj = Projection.FITSWCSpix(modelff[0].header);
- # map Stokes planes: at least the first one ("I", presumably) must be present
- # The rest are represented by indices in model_stp. Thus e.g. for an IQUV data image and an IV model,
- # model_stp will be [0,-1,-1,1]
- model_stp = [ (model_stokes.index(st) if st in model_stokes else -1) for st in stokes ];
- if model_stp[0] < 0:
- print "Warning: model image %s lacks Stokes %s, skipping."%(src.shape.filename,model_stokes[0]);
- continue;
- # figure out whether the images overlap at all
- # in the trivial case, both images have the same WCS, so no resampling is needed
- if model.shape[:2] == data.shape[:2] and modelproj == proj:
- model_resampler = lambda x:x;
- data_x_slice = data_y_slice = slice(None);
- dprintf(3,"Source %s: same resolution as output, no interpolation needed\n",src.shape.filename);
- # else make a resampler engine
- else:
- model_resampler = ImageResampler(modelproj,proj,
- numpy.arange(model.shape[0],dtype=float),numpy.arange(model.shape[1],dtype=float),
- numpy.arange(data.shape[0],dtype=float),numpy.arange(data.shape[1],dtype=float));
- data_x_slice,data_y_slice = model_resampler.targetSlice();
- dprintf(3,"Source %s: resampling into image at %s, %s\n",src.shape.filename,data_x_slice,data_y_slice);
- # skip this source if no overlap
- if data_x_slice is None or data_y_slice is None:
- continue;
- # warn about ignored model axes (e.g. when model has frequency and our output doesn't)
- if removed_model_axes:
- print "Warning: model image %s has one or more axes that are not present in the output image:"%src.shape.filename;
- print " taking the first plane along (%s)."%(",".join(removed_model_axes));
- # evaluate convolution kernel for this model scale, if not already cached
- conv_kernel = conv_kernels.get((modelproj.xscale,modelproj.yscale),None);
- if conv_kernel is None:
- box_radius = 5*(max(gmaj,gmin))/min(abs(modelproj.xscale),abs(modelproj.yscale));
- radius = int(round(box_radius));
- # convert pixel coordinates into world coordinates relative to 0
- xi = numpy.arange(-radius,radius+1)*modelproj.xscale
- yj = numpy.arange(-radius,radius+1)*modelproj.yscale
- # work out rotated coordinates
- xi1 = (xi*cos_rot)[:,numpy.newaxis] - (yj*sin_rot)[numpy.newaxis,:];
- yj1 = (xi*sin_rot)[:,numpy.newaxis] + (yj*cos_rot)[numpy.newaxis,:];
- # evaluate convolution kernel
- conv_kernel = numpy.exp(-((xi1/gmaj)**2+(yj1/gmin)**2)/2.);
- conv_kernels[modelproj.xscale,modelproj.yscale] = conv_kernel;
- # Work out data slices that we need to loop over.
- # For every 2D slice in the data image cube (assuming other axes besides x/y), we need to apply a
- # convolution to the corresponding model slice, and add it in to the data slice. The complication
- # is that any extra axis may be of length 1 in the model and of length N in the data (e.g. frequency axis),
- # in which case we need to add the same model slice to all N data slices. The loop below puts together a series
- # of index tuples representing each per-slice operation.
- # These two initial slices correspond to the x/y axes. Additional indices will be appended to these in a loop
- slices0 = [([data_x_slice,data_y_slice],[slice(None),slice(None)])];
- # work out Stokes axis
- sd0 = [data_x_slice,data_y_slice];
- sm0 = [slice(None),slice(None)];
- slices = [];
- slices = [ (sd0+[dst],sm0+[mst]) for dst,mst in enumerate(model_stp) if mst >= 0 ];
- #for dst,mst in enumerate(model_stp):
- #if mst >= 0:
- #slices = [ (sd0+[dst],sm0+[mst]) for sd0,sm0 in slices ];
- # now loop over extra axes
- for axis in range(3,len(extra_data_axes)+3):
- # list of data image indices to iterate over for this axis, 0...N-1
- indices = [[x] for x in range(data.shape[axis])];
- # list of model image indices to iterate over
- if model.shape[axis] == 1:
- model_indices = [[0]]*len(indices);
- # shape-n: must be same as data, in which case 0..N-1 is assigned to 0..N-1
- elif model.shape[axis] == data.shape[axis]:
- model_indices = indices;
- # else error
+ return map_coordinates(image, self._target_coords)
+
+
+def restoreSources(fits_hdu, sources, gmaj, gmin=None, grot=0, freq=None, primary_beam=None, apply_beamgain=False,
+ ignore_nobeam=False):
+ """Restores sources (into the given FITSHDU) using a Gaussian PSF given by gmaj/gmin/grot, in radians.
+ gmaj/gmin is major/minor sigma parameter; grot is PA in the North thru East convention (PA=0 is N).
+
+ If gmaj=0, uses delta functions instead.
+ If freq is specified, converts flux to the specified frequency.
+ If primary_beam is specified, uses it to apply a PB gain to each source. This must be a function of two arguments:
+ r and freq, returning the power beam gain.
+ If apply_beamgain is true, applies beamgain atribute instead, if this exists.
+ Source tagged 'nobeam' will not have the PB gain applied, unless ignore_nobeam=True
+ """
+ hdr = fits_hdu.header
+ data, stokes, extra_data_axes, dum = getImageCube(fits_hdu)
+ # create projection object, using pixel coordinates
+ proj = Projection.FITSWCSpix(hdr)
+ naxis = len(data.shape)
+ nx = data.shape[0]
+ ny = data.shape[1]
+ dprintf(1, "Read image of shape %s\n", data.shape)
+
+ # Now we make "indexer" tuples. These use the numpy.newarray index to turn elementary vectors into
+ # full arrays of the same number of dimensions as 'data' (data can be 2-, 3- or 4-dimensional, so we need
+ # a general solution.)
+ # For e.g. a nfreq x nstokes x ny x nx array, the following objects are created:
+ # x_indexer turns n-vector vx into a _,_,_,n array
+ # y_indexer turns m-vector vy into a _,_,m,_ array
+ # stokes_indexer turns the stokes vector into a _,nst,_,_ array
+ # ...where "_" is numpy.newaxis.
+ # The happy result of all this is that we can add a Gaussian into the data array at i1:i2,j1:j2 as follows:
+ # 1. form up vectors of world coordinates (vx,vy) corresponding to pixel coordinates i1:i2 and j1:j2
+ # 2. form up vector of Stokes parameters
+ # 3. g = Gauss(vx[x_indexer],vy[y_indexer])*stokes[stokes_indexer]
+ # 4. Just say data[j1:j2,i1:2,...] += g
+ # This automatically expands all array dimensions as needed.
+
+ # This is a helper function, returns an naxis-sized tuple, with slice(None) in the Nth
+ # position, and elem_index elsewhere.
+ def make_axis_indexer(n, elem_index=numpy.newaxis):
+ indexer = [elem_index] * naxis
+ indexer[n] = slice(None)
+ return tuple(indexer)
+
+ x_indexer = make_axis_indexer(0)
+ y_indexer = make_axis_indexer(1)
+ # figure out stokes
+ nstokes = len(stokes)
+ stokes_vec = numpy.zeros((nstokes,))
+ stokes_indexer = make_axis_indexer(2)
+ dprint(2, "Stokes are", stokes)
+ dprint(2, "Stokes indexing vector is", stokes_indexer)
+ # get pixel sizes, in radians
+ # gmaj != 0: use gaussian. Estimate PSF box size. We want a +/-5 sigma box
+ if gmaj > 0:
+ # convert grot from N-E to W-N (which is the more conventional mathematical definition of these things), so X is major axis
+ grot += math.pi / 2
+ if gmin == 0:
+ gmin = gmaj
+ cos_rot = math.cos(grot)
+ sin_rot = math.sin(-grot); # rotation is N->E, so swap the sign
+ else:
+ gmaj = gmin = grot = 0
+ conv_kernels = {}
+ # loop over sources in model
+ for src in sources:
+ # get normalized intensity, if spectral info is available
+ if freq is not None and getattr(src, 'spectrum', None):
+ ni = src.spectrum.normalized_intensity(freq)
+ dprintf(3, "Source %s: normalized spectral intensity is %f\n", src.name, ni)
else:
- raise RuntimeError,"axis %s of model image %s doesn't match that of output image"%\
- (extra_data_axes[axis-3],src.shape.filename);
- # update list of slices
- slices =[ (sd0+sd,si0+si) for sd0,si0 in slices for sd,si in zip(indices,model_indices) ];
- # now loop over slices and assign
- for sd,si in slices:
- conv = convolve(model[tuple(si)],conv_kernel);
- data[tuple(sd)] += model_resampler(conv);
- ## for debugging these are handy:
- #data[0:conv.shape[0],0:conv.shape[1],0,0] = conv;
- #data[0:conv_kernel.shape[0],-conv_kernel.shape[1]:,0,0] = conv_kernel;
+ ni = 1
+ # multiply that by PB gain, if given
+ if ignore_nobeam or not getattr(src, 'nobeam', False):
+ if apply_beamgain and hasattr(src, 'beamgain'):
+ ni *= getattr(src, 'beamgain')
+ elif primary_beam:
+ r = getattr(src, 'r', None)
+ if r is not None:
+ pb = primary_beam(r, freq)
+ ni *= pb
+ dprintf(3, "Source %s: r=%g pb=%f, normalized intensity is %f\n", src.name, r, pb, ni)
+ # process point sources
+ if src.typecode in ('pnt', 'Gau'):
+ # pixel coordinates of source
+ xsrc, ysrc = proj.lm(src.pos.ra, src.pos.dec)
+ # form up stokes vector
+ for i, st in enumerate(stokes):
+ stokes_vec[i] = getattr(src.flux, st, 0) * ni
+ dprintf(3, "Source %s, %s Jy, at pixel %f,%f\n", src.name, stokes_vec, xsrc, ysrc)
+ # for gaussian sources, convolve with beam
+ if src.typecode == 'Gau':
+ pa0 = src.shape.pa + math.pi / 2; # convert PA from N->E to conventional W->N
+ ex0, ey0 = src.shape.ex / FWHM, src.shape.ey / FWHM; # convert extents from FWHM to sigmas, since gmaj/gmin is in same scale
+ if gmaj > 0:
+ ex, ey, pa = convolveGaussian(ex0, ey0, pa0, gmaj, gmin, grot)
+ # normalize flux by beam/extent ratio
+ stokes_vec *= (gmaj * gmin) / (ex * ey)
+ # print "%3dx%-3d@%3d * %3dx%-3d@%3d -> %3dx%-3d@%3d"%(
+ # ex0 *FWHM*ARCSEC,ey0 *FWHM*ARCSEC,(pa0-math.pi/2)*DEG,
+ # gmaj*FWHM*ARCSEC,gmin*FWHM*ARCSEC,(grot-math.pi/2)*DEG,
+ # ex *FWHM*ARCSEC,ey *FWHM*ARCSEC,(pa-math.pi/2)*DEG)
+ else:
+ # normalize flux by pixel/extent ratio
+ ex, ey, pa = ex0, ey0, pa0
+ stokes_vec *= (abs(proj.xscale * proj.yscale)) / (ex * ey)
+ else:
+ ex, ey, pa = gmaj, gmin, grot
+ # gmaj != 0: use gaussian.
+ if ex > 0 or ey > 0:
+ # work out restoring box
+ box_radius = 5 * (max(ex, ey)) / min(abs(proj.xscale), abs(proj.yscale))
+ dprintf(2, "Will use a box of radius %f pixels for restoration\n", box_radius)
+ cos_pa = math.cos(pa)
+ sin_pa = math.sin(-pa); # rotation is N->E, so swap the sign
+ # pixel coordinates of box around source in which we evaluate the gaussian
+ i1 = max(0, int(math.floor(xsrc - box_radius)))
+ i2 = min(nx, int(math.ceil(xsrc + box_radius)))
+ j1 = max(0, int(math.floor(ysrc - box_radius)))
+ j2 = min(ny, int(math.ceil(ysrc + box_radius)))
+ # skip sources if box doesn't overlap image
+ if i1 >= i2 or j1 >= j2:
+ continue
+ # now we convert pixel indices within the box into world coordinates, relative to source position
+ xi = (numpy.arange(i1, i2) - xsrc) * proj.xscale
+ yj = (numpy.arange(j1, j2) - ysrc) * proj.yscale
+ # work out rotated coordinates
+ xi1 = (xi * cos_pa)[x_indexer] - (yj * sin_pa)[y_indexer]
+ yj1 = (xi * sin_pa)[x_indexer] + (yj * cos_pa)[y_indexer]
+ # evaluate gaussian at these, scale up by stokes vector
+ gg = stokes_vec[stokes_indexer] * numpy.exp(-((xi1 / ex) ** 2 + (yj1 / ey) ** 2) / 2.)
+ # add into data
+ data[i1:i2, j1:j2, ...] += gg
+ # else gmaj=0: use delta functions
+ else:
+ xsrc = int(round(xsrc))
+ ysrc = int(round(ysrc))
+ # skip sources outside image
+ if xsrc < 0 or xsrc >= nx or ysrc < 0 or ysrc >= ny:
+ continue
+ xdum = numpy.array([1])
+ ydum = numpy.array([1])
+ data[xsrc:xsrc + 1, ysrc:ysrc + 1, ...] += stokes_vec[stokes_indexer] * xdum[x_indexer] * ydum[
+ y_indexer]
+ # process model images -- convolve with PSF and add to data
+ elif src.typecode == "FITS":
+ modelff = pyfits.open(src.shape.filename)
+ model, model_stokes, extra_model_axes, removed_model_axes = \
+ getImageCube(modelff[0], src.shape.filename, extra_axes=extra_data_axes)
+ modelproj = Projection.FITSWCSpix(modelff[0].header)
+ # map Stokes planes: at least the first one ("I", presumably) must be present
+ # The rest are represented by indices in model_stp. Thus e.g. for an IQUV data image and an IV model,
+ # model_stp will be [0,-1,-1,1]
+ model_stp = [(model_stokes.index(st) if st in model_stokes else -1) for st in stokes]
+ if model_stp[0] < 0:
+ print "Warning: model image %s lacks Stokes %s, skipping." % (src.shape.filename, model_stokes[0])
+ continue
+ # figure out whether the images overlap at all
+ # in the trivial case, both images have the same WCS, so no resampling is needed
+ if model.shape[:2] == data.shape[:2] and modelproj == proj:
+ model_resampler = lambda x: x
+ data_x_slice = data_y_slice = slice(None)
+ dprintf(3, "Source %s: same resolution as output, no interpolation needed\n", src.shape.filename)
+ # else make a resampler engine
+ else:
+ model_resampler = ImageResampler(modelproj, proj,
+ numpy.arange(model.shape[0], dtype=float),
+ numpy.arange(model.shape[1], dtype=float),
+ numpy.arange(data.shape[0], dtype=float),
+ numpy.arange(data.shape[1], dtype=float))
+ data_x_slice, data_y_slice = model_resampler.targetSlice()
+ dprintf(3, "Source %s: resampling into image at %s, %s\n", src.shape.filename, data_x_slice,
+ data_y_slice)
+ # skip this source if no overlap
+ if data_x_slice is None or data_y_slice is None:
+ continue
+ # warn about ignored model axes (e.g. when model has frequency and our output doesn't)
+ if removed_model_axes:
+ print "Warning: model image %s has one or more axes that are not present in the output image:" % src.shape.filename
+ print " taking the first plane along (%s)." % (",".join(removed_model_axes))
+ # evaluate convolution kernel for this model scale, if not already cached
+ conv_kernel = conv_kernels.get((modelproj.xscale, modelproj.yscale), None)
+ if conv_kernel is None:
+ box_radius = 5 * (max(gmaj, gmin)) / min(abs(modelproj.xscale), abs(modelproj.yscale))
+ radius = int(round(box_radius))
+ # convert pixel coordinates into world coordinates relative to 0
+ xi = numpy.arange(-radius, radius + 1) * modelproj.xscale
+ yj = numpy.arange(-radius, radius + 1) * modelproj.yscale
+ # work out rotated coordinates
+ xi1 = (xi * cos_rot)[:, numpy.newaxis] - (yj * sin_rot)[numpy.newaxis, :]
+ yj1 = (xi * sin_rot)[:, numpy.newaxis] + (yj * cos_rot)[numpy.newaxis, :]
+ # evaluate convolution kernel
+ conv_kernel = numpy.exp(-((xi1 / gmaj) ** 2 + (yj1 / gmin) ** 2) / 2.)
+ conv_kernels[modelproj.xscale, modelproj.yscale] = conv_kernel
+ # Work out data slices that we need to loop over.
+ # For every 2D slice in the data image cube (assuming other axes besides x/y), we need to apply a
+ # convolution to the corresponding model slice, and add it in to the data slice. The complication
+ # is that any extra axis may be of length 1 in the model and of length N in the data (e.g. frequency axis),
+ # in which case we need to add the same model slice to all N data slices. The loop below puts together a series
+ # of index tuples representing each per-slice operation.
+ # These two initial slices correspond to the x/y axes. Additional indices will be appended to these in a loop
+ slices0 = [([data_x_slice, data_y_slice], [slice(None), slice(None)])]
+ # work out Stokes axis
+ sd0 = [data_x_slice, data_y_slice]
+ sm0 = [slice(None), slice(None)]
+ slices = []
+ slices = [(sd0 + [dst], sm0 + [mst]) for dst, mst in enumerate(model_stp) if mst >= 0]
+ # for dst,mst in enumerate(model_stp):
+ # if mst >= 0:
+ # slices = [ (sd0+[dst],sm0+[mst]) for sd0,sm0 in slices ]
+ # now loop over extra axes
+ for axis in range(3, len(extra_data_axes) + 3):
+ # list of data image indices to iterate over for this axis, 0...N-1
+ indices = [[x] for x in range(data.shape[axis])]
+ # list of model image indices to iterate over
+ if model.shape[axis] == 1:
+ model_indices = [[0]] * len(indices)
+ # shape-n: must be same as data, in which case 0..N-1 is assigned to 0..N-1
+ elif model.shape[axis] == data.shape[axis]:
+ model_indices = indices
+ # else error
+ else:
+ raise RuntimeError, "axis %s of model image %s doesn't match that of output image" % \
+ (extra_data_axes[axis - 3], src.shape.filename)
+ # update list of slices
+ slices = [(sd0 + sd, si0 + si) for sd0, si0 in slices for sd, si in zip(indices, model_indices)]
+ # now loop over slices and assign
+ for sd, si in slices:
+ conv = convolve(model[tuple(si)], conv_kernel)
+ data[tuple(sd)] += model_resampler(conv)
+ ## for debugging these are handy:
+ # data[0:conv.shape[0],0:conv.shape[1],0,0] = conv
+ # data[0:conv_kernel.shape[0],-conv_kernel.shape[1]:,0,0] = conv_kernel
diff --git a/Tigger/Tools/__init__.py b/Tigger/Tools/__init__.py
index 52e8a1c..363ecc6 100644
--- a/Tigger/Tools/__init__.py
+++ b/Tigger/Tools/__init__.py
@@ -1,5 +1,5 @@
#
-#% $Id$
+# % $Id$
#
#
# Copyright (C) 2002-2011
diff --git a/Tigger/Tools/gaussfitter2.py b/Tigger/Tools/gaussfitter2.py
index ac02aba..93a4d1b 100644
--- a/Tigger/Tools/gaussfitter2.py
+++ b/Tigger/Tools/gaussfitter2.py
@@ -2,7 +2,7 @@
# gaussfitter.py
# created by Adam Ginsburg (adam.ginsburg@colorado.edu or keflavich@gmail.com) 3/17/08)
#
-#% $Id$
+# % $Id$
#
#
# Copyright (C) 2002-2011
@@ -30,33 +30,35 @@
from scipy import optimize
from scipy import stats
-def moments (data,circle,rotate,vheight):
+
+def moments(data, circle, rotate, vheight):
"""Returns (height, amplitude, x, y, width_x, width_y, rotation angle)
the gaussian parameters of a 2D distribution by calculating its
moments. Depending on the input parameters, will only output
a subset of the above"""
total = data.sum()
X, Y = np.ndices(data.shape)
- x = (X*data).sum()/total
- y = (Y*data).sum()/total
+ x = (X * data).sum() / total
+ y = (Y * data).sum() / total
col = data[:, int(y)]
- width_x = np.sqrt(abs((np.arange(col.size)-y)**2*col).sum()/col.sum())
+ width_x = np.sqrt(abs((np.arange(col.size) - y) ** 2 * col).sum() / col.sum())
row = data[int(x), :]
- width_y = np.sqrt(abs((np.arange(row.size)-x)**2*row).sum()/row.sum())
- width = ( width_x + width_y ) / 2.
- height = stats.mode(data.ravel())[0][0] if vheight else 0;
- amplitude = data.max()-height
- mylist = [amplitude,x,y]
- if vheight==1:
+ width_y = np.sqrt(abs((np.arange(row.size) - x) ** 2 * row).sum() / row.sum())
+ width = (width_x + width_y) / 2.
+ height = stats.mode(data.ravel())[0][0] if vheight else 0
+ amplitude = data.max() - height
+ mylist = [amplitude, x, y]
+ if vheight == 1:
mylist = [height] + mylist
- if circle==0:
- mylist = mylist + [width_x,width_y]
+ if circle == 0:
+ mylist = mylist + [width_x, width_y]
else:
mylist = mylist + [width]
- if rotate==1:
- mylist = mylist + [0.] #rotation "moment" is just zero...
+ if rotate == 1:
+ mylist = mylist + [0.] # rotation "moment" is just zero...
return tuple(mylist)
+
def twodgaussian(inpars, circle, rotate, vheight):
"""Returns a 2d gaussian function of the form:
x' = cos(rota) * x - sin(rota) * y
@@ -86,7 +88,7 @@ def twodgaussian(inpars, circle, rotate, vheight):
height = float(height)
else:
height = float(0)
- amplitude, center_x, center_y = inpars.pop(0),inpars.pop(0),inpars.pop(0)
+ amplitude, center_x, center_y = inpars.pop(0), inpars.pop(0), inpars.pop(0)
amplitude = float(amplitude)
center_x = float(center_x)
center_y = float(center_y)
@@ -95,12 +97,12 @@ def twodgaussian(inpars, circle, rotate, vheight):
width_x = float(width)
width_y = float(width)
else:
- width_x, width_y = inpars.pop(0),inpars.pop(0)
+ width_x, width_y = inpars.pop(0), inpars.pop(0)
width_x = float(width_x)
width_y = float(width_y)
if rotate == 1:
rota = inpars.pop(0)
- rota = np.pi/180. * float(rota)
+ rota = np.pi / 180. * float(rota)
rcen_x = center_x * np.cos(rota) - center_y * np.sin(rota)
rcen_y = center_x * np.sin(rota) + center_y * np.cos(rota)
else:
@@ -108,22 +110,25 @@ def twodgaussian(inpars, circle, rotate, vheight):
rcen_y = center_y
if len(inpars) > 0:
raise ValueError("There are still input parameters:" + str(inpars) + \
- " and you've input: " + str(inpars_old) + " circle=%d, rotate=%d, vheight=%d" % (circle,rotate,vheight) )
-
- def rotgauss(x,y):
- if rotate==1:
+ " and you've input: " + str(inpars_old) + " circle=%d, rotate=%d, vheight=%d" % (
+ circle, rotate, vheight))
+
+ def rotgauss(x, y):
+ if rotate == 1:
xp = x * np.cos(rota) - y * np.sin(rota)
yp = x * np.sin(rota) + y * np.cos(rota)
else:
xp = x
yp = y
- g = height+amplitude*np.exp(
- -(((rcen_x-xp)/width_x)**2+
- ((rcen_y-yp)/width_y)**2)/2.)
+ g = height + amplitude * np.exp(
+ -(((rcen_x - xp) / width_x) ** 2 +
+ ((rcen_y - yp) / width_y) ** 2) / 2.)
return g
+
return rotgauss
-def gaussfit(data,err=None,params=[],autoderiv=1,return_all=0,circle=0,rotate=1,vheight=1):
+
+def gaussfit(data, err=None, params=[], autoderiv=1, return_all=0, circle=0, rotate=1, vheight=1):
"""
Gaussian fitter with the ability to fit a variety of different forms of 2-dimensional gaussian.
@@ -154,11 +159,12 @@ def gaussfit(data,err=None,params=[],autoderiv=1,return_all=0,circle=0,rotate=1,
Warning: Does NOT necessarily output a rotation angle between 0 and 360 degrees.
"""
if params == []:
- params = (moments(data,circle,rotate,vheight))
+ params = (moments(data, circle, rotate, vheight))
if err == None:
- errorfunction = lambda p: np.ravel((twodgaussian(p,circle,rotate,vheight)(*np.indices(data.shape)) - data))
+ errorfunction = lambda p: np.ravel((twodgaussian(p, circle, rotate, vheight)(*np.indices(data.shape)) - data))
else:
- errorfunction = lambda p: np.ravel((twodgaussian(p,circle,rotate,vheight)(*np.indices(data.shape)) - data)/err)
+ errorfunction = lambda p: np.ravel(
+ (twodgaussian(p, circle, rotate, vheight)(*np.indices(data.shape)) - data) / err)
if autoderiv == 0:
# the analytic derivative, while not terribly difficult, is less efficient and useful. I only bothered
# putting it here because I was instructed to do so for a class project - please ask if you would like
@@ -166,7 +172,7 @@ def gaussfit(data,err=None,params=[],autoderiv=1,return_all=0,circle=0,rotate=1,
raise ValueError("I'm sorry, I haven't implemented this feature yet.")
else:
p, cov, infodict, errmsg, success = optimize.leastsq(errorfunction, params, full_output=1)
- if return_all == 0:
+ if return_all == 0:
return p
elif return_all == 1:
- return p,cov,infodict,errmsg
+ return p, cov, infodict, errmsg
diff --git a/Tigger/__init__.py b/Tigger/__init__.py
index d4875dc..6b827af 100644
--- a/Tigger/__init__.py
+++ b/Tigger/__init__.py
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
#
-#% $Id$
+# % $Id$
#
#
# Copyright (C) 2002-2011
@@ -26,9 +26,10 @@
import sys
-from Tigger.Models.Formats import load, save, listFormats
import Kittens.config
+from Tigger.Models.Formats import load, save, listFormats
+
__version__ = "1.4.2"
release_string = __version__
@@ -37,32 +38,34 @@
matplotlib_nuked = False
-startup_dprint = startup_dprintf = lambda *dum:None
+startup_dprint = startup_dprintf = lambda *dum: None
_verbosity = Kittens.utils.verbosity(name="tigger")
dprint = _verbosity.dprint
dprintf = _verbosity.dprintf
-def import_pyfits ():
- # leaving this here for backwards compatibility
- from astropy.io import fits as pyfits
- return pyfits
+def import_pyfits():
+ # leaving this here for backwards compatibility
+ from astropy.io import fits as pyfits
+ return pyfits
+
+
+def nuke_matplotlib():
+ """Some people think nothing of importing matplotlib at every opportunity, with no regard
+ to consequences. Tragically, some of these people also write Python code, and some of them
+ are responsible for astLib. Seriously man, if I just want to pull in WCS support, why the fuck
+ do I need the monstrous entirety of matplotlib to come along with it, especially since it
+ kills things like Qt outright?
+ This function prevents such perversitities from happening, by inserting dummy modules
+ into the sys.modules dict. Call nuke_matplotlib() once, and all further attempts to
+ import matplotlib by any other code will be cheerfully ignored.
+ """
+ global matplotlib_nuked
+ if 'pylab' not in sys.modules:
+ # replace the modules referenced by astLib by dummy_module objects, which return a dummy callable for every attribute
+ class dummy_module(object):
+ def __getattr__(self, name):
+ return 'nowhere' if name == '__file__' else (lambda *args, **kw: True)
-def nuke_matplotlib ():
- """Some people think nothing of importing matplotlib at every opportunity, with no regard
- to consequences. Tragically, some of these people also write Python code, and some of them
- are responsible for astLib. Seriously man, if I just want to pull in WCS support, why the fuck
- do I need the monstrous entirety of matplotlib to come along with it, especially since it
- kills things like Qt outright?
- This function prevents such perversitities from happening, by inserting dummy modules
- into the sys.modules dict. Call nuke_matplotlib() once, and all further attempts to
- import matplotlib by any other code will be cheerfully ignored.
- """
- global matplotlib_nuked
- if 'pylab' not in sys.modules:
- # replace the modules referenced by astLib by dummy_module objects, which return a dummy callable for every attribute
- class dummy_module (object):
- def __getattr__ (self,name):
- return 'nowhere' if name == '__file__' else (lambda *args,**kw:True)
- sys.modules['pylab'] = sys.modules['matplotlib'] = sys.modules['matplotlib.patches'] = dummy_module()
- matplotlib_nuked = True
+ sys.modules['pylab'] = sys.modules['matplotlib'] = sys.modules['matplotlib.patches'] = dummy_module()
+ matplotlib_nuked = True
diff --git a/Tigger/bin/tigger-convert b/Tigger/bin/tigger-convert
index 94601f5..51fb58e 100755
--- a/Tigger/bin/tigger-convert
+++ b/Tigger/bin/tigger-convert
@@ -2,7 +2,7 @@
# -*- coding: utf-8 -*-
#
-#% $Id$
+# % $Id$
#
#
# Copyright (C) 2002-2011
@@ -26,1014 +26,1050 @@
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
-import sys
-from astropy.io import fits as pyfits
-import re
-import os.path
import glob
import math
-import numpy
+import re
+import sys
import traceback
-DEG = math.pi/180;
+import numpy
+import os.path
-NATIVE = "Tigger";
+DEG = math.pi / 180
+
+NATIVE = "Tigger"
+
+
+def Jones2Mueller_circular(J):
+ S = numpy.matrix([[1, 0, 0, 1], [0, 1, 1j, 0], [0, 1, -1j, 0], [1, 0, 0, -1]])
+ # Compute the Mueller matrix
+ MM = (S.I) * numpy.kron(J, J.H) * S
+ return numpy.real(MM)
+
+
+def Jones2Mueller_linear(J):
+ S = numpy.matrix([[1, 1, 0, 0], [0, 0, 1, 1j], [0, 0, 1, -1j], [1, -1, 0, 0]])
+ # Compute the Mueller matrix
+ MM = (S.I) * numpy.kron(J, J.H) * S
+ return numpy.real(MM)
-def Jones2Mueller_circular (J):
- S = numpy.matrix([[1,0,0,1],[0,1,1j,0],[0,1,-1j,0],[1,0,0,-1]])
- # Compute the Mueller matrix
- MM = (S.I) * numpy.kron(J, J.H) * S
- return numpy.real(MM)
-def Jones2Mueller_linear (J):
- S = numpy.matrix([[1,1,0,0],[0,0,1,1j],[0,0,1,-1j],[1,-1,0,0]])
- # Compute the Mueller matrix
- MM = (S.I) * numpy.kron(J, J.H) * S
- return numpy.real(MM)
-
## Griffin's old version, for linear. Possibly the order is wrong
# A=numpy.matrix([[1,0,0,1],[1,0,0,-1],[0,1,1,0],[0,1j,-1j,0]])
# M=A*numpy.kron(J,J.conj())*numpy.linalg.inv(A)
# return numpy.real(M)
-def arc2lm(l0,m0,arclen=2.*numpy.pi,nsteps=360):
- """Return cartesian positions that sample an arc of a circle (similar to numpy.linspace)
- l0,m0: initial cartesian position to determine radius and starting point
- arclen: angle, in radians, to sample, value should be between 0 and 2pi
- nsteps: number of samples"""
- r=numpy.sqrt(float(l0)**2.+float(m0)**2.)
- angle=numpy.arctan2(m0,l0)
- da=numpy.linspace(0.,arclen,num=nsteps)
- l=r*numpy.cos(angle+da)
- m=r*numpy.sin(angle+da)
- return l,m
-
-def rotatelm (l0,m0,rotangle):
- """Rotate (l0,m0) to a new (l,m) based on angle"""
- r = numpy.sqrt(float(l0)**2.+float(m0)**2.)
- angle = numpy.arctan2(m0,l0)
- l = r*numpy.cos(angle+rotangle)
- m = r*numpy.sin(angle+rotangle)
- return l,m
+def arc2lm(l0, m0, arclen=2. * numpy.pi, nsteps=360):
+ """Return cartesian positions that sample an arc of a circle (similar to numpy.linspace)
+ l0,m0: initial cartesian position to determine radius and starting point
+ arclen: angle, in radians, to sample, value should be between 0 and 2pi
+ nsteps: number of samples"""
+ r = numpy.sqrt(float(l0) ** 2. + float(m0) ** 2.)
+ angle = numpy.arctan2(m0, l0)
+ da = numpy.linspace(0., arclen, num=nsteps)
+ l = r * numpy.cos(angle + da)
+ m = r * numpy.sin(angle + da)
+ return l, m
+
+
+def rotatelm(l0, m0, rotangle):
+ """Rotate (l0,m0) to a new (l,m) based on angle"""
+ r = numpy.sqrt(float(l0) ** 2. + float(m0) ** 2.)
+ angle = numpy.arctan2(m0, l0)
+ l = r * numpy.cos(angle + rotangle)
+ m = r * numpy.sin(angle + rotangle)
+ return l, m
+
if __name__ == '__main__':
- import Kittens.utils
- from Kittens.utils import curry
- _verbosity = Kittens.utils.verbosity(name="convert-model");
- dprint = _verbosity.dprint;
- dprintf = _verbosity.dprintf;
-
- # find Tigger
- try:
- import Tigger
- except ImportError:
- dirname = os.path.dirname(os.path.realpath(__file__));
- # go up the directory tree looking for directory "Tigger"
- while len(dirname) > 1:
- if os.path.basename(dirname) == "Tigger":
- break;
- dirname = os.path.dirname(dirname);
- else:
- print "Unable to locate the Tigger directory, it is not a parent of %s. Please check your installation and/or PYTHONPATH."%os.path.realpath(__file__);
- sys.exit(1);
- sys.path.append(os.path.dirname(dirname));
+ import Kittens.utils
+
+ _verbosity = Kittens.utils.verbosity(name="convert-model")
+ dprint = _verbosity.dprint
+ dprintf = _verbosity.dprintf
+
+ # find Tigger
try:
- import Tigger
- except:
- print "Unable to import the Tigger package from %s. Please check your installation and PYTHONPATH."%dirname;
- sys.exit(1);
-
- # some things can implicitly invoke matplotlib, which can cry when no X11 is around
- # so to make sure thingfs work in pipelines, we explicitly disable this here, unless we're asked for plots
- if not "--enable-plots" in sys.argv:
- Tigger.nuke_matplotlib(); # don't let the door hit you in the ass, sucka
-
- from Tigger import Coordinates
- import Tigger.Models.Formats
- import Tigger.Models.ModelClasses
- AUTO = "auto";
- full_formats = Tigger.Models.Formats.listFormatsFull();
- input_formats = [ name for name,(load,save,doc,extensions) in full_formats if load ] + [ AUTO ];
- output_formats = [ name for name,(load,save,doc,extensions) in full_formats if save ] + [ AUTO ];
-
- from Tigger.Models.Formats import ASCII
-
- # setup some standard command-line option parsing
- #
- from optparse import OptionParser,OptionGroup
- parser = OptionParser(usage="""%prog: sky_model [output_model]""",
- description="""Converts sky models into Tigger format and/or applies various processing options.
+ import Tigger
+ except ImportError:
+ dirname = os.path.dirname(os.path.realpath(__file__))
+ # go up the directory tree looking for directory "Tigger"
+ while len(dirname) > 1:
+ if os.path.basename(dirname) == "Tigger":
+ break
+ dirname = os.path.dirname(dirname)
+ else:
+ print "Unable to locate the Tigger directory, it is not a parent of %s. Please check your installation and/or PYTHONPATH." % os.path.realpath(
+ __file__)
+ sys.exit(1)
+ sys.path.append(os.path.dirname(dirname))
+ try:
+ import Tigger
+ except:
+ print "Unable to import the Tigger package from %s. Please check your installation and PYTHONPATH." % dirname
+ sys.exit(1)
+
+ # some things can implicitly invoke matplotlib, which can cry when no X11 is around
+ # so to make sure thingfs work in pipelines, we explicitly disable this here, unless we're asked for plots
+ if not "--enable-plots" in sys.argv:
+ Tigger.nuke_matplotlib(); # don't let the door hit you in the ass, sucka
+
+ from Tigger import Coordinates
+ import Tigger.Models.Formats
+ import Tigger.Models.ModelClasses
+
+ AUTO = "auto"
+ full_formats = Tigger.Models.Formats.listFormatsFull()
+ input_formats = [name for name, (load, save, doc, extensions) in full_formats if load] + [AUTO]
+ output_formats = [name for name, (load, save, doc, extensions) in full_formats if save] + [AUTO]
+
+ from Tigger.Models.Formats import ASCII
+
+ # setup some standard command-line option parsing
+ #
+ from optparse import OptionParser, OptionGroup
+
+ parser = OptionParser(usage="""%prog: sky_model [output_model]""",
+ description="""Converts sky models into Tigger format and/or applies various processing options.
Input 'sky_model' may be any model format importable by Tigger, recognized by extension, or explicitly specified via an option switch.
'output_model' is always a native Tigger model. If an output model is not specfied, the conversion is done in-place if the input model
is a Tigger model (-f switch must be specified to allow overwriting), or else a new filename is generated.""")
- group = OptionGroup(parser,"Input/output and conversion options")
- parser.add_option_group(group);
- group.add_option("-f","--force",action="store_true",
- help="Forces overwrite of output model.");
- group.add_option("-t","--type",choices=input_formats,
- help="Input model type (%s). Default is %%default."%(", ".join(input_formats)));
- group.add_option("-o","--output-type",choices=output_formats,metavar="TYPE",
- help="Output model type (%s). Default is %%default."%(", ".join(output_formats)));
- group.add_option("-a","--append",metavar="FILENAME",action="append",
- help="Append another model to input model. May be given multiple times.");
- group.add_option("--append-type",choices=input_formats,metavar="TYPE",
- help="Appended model type (%s). Default is %%default."%(", ".join(input_formats)));
- group.add_option("--format",type="string",
- help="""Input format, for ASCII or BBS tables. For ASCII tables, default is "%s". For BBS tables, the default format is specified in the file header."""%ASCII.DefaultDMSFormatString);
- group.add_option("--append-format",type="string",default="",
- help="""Format of appended file, for ASCII or BBS tables. Default is to use --format.""");
- group.add_option("--output-format",type="string",metavar="FORMAT",
- help="""Output format, for ASCII or BBS tables. If the model was originally imported from an ASCII or BBS table, the default output format will be the same as the original format.""");
- group.add_option("--help-format",action="store_true",
- help="Prints help on format strings.");
- group.add_option("--min-extent",type="float",metavar="ARCSEC",
- help="Minimal source extent, when importing NEWSTAR or ASCII files. Sources with a smaller extent will be treated as point sources. Default is %default.");
-
- group = OptionGroup(parser,"Options to select a subset of the input")
- parser.add_option_group(group);
- group.add_option("-T","--tags",type="string",action="append",metavar="TAG",
- help="Extract sources with the specified tags.");
- group.add_option("--select",type="string",metavar='TAG<>VALUE',action="append",
- help="Selects a subset of sources by comparing the named TAG to a float VALUE. '<>' "+
- "represents the comparison operator, and can be one of == (or =),!=,<=,<,>,>=. Alternatively, "+
- "you may use the FORTRAN-style operators .eq.,.ne.,.le.,.lt.,.gt.,.ge. Multiple " +
- "select options may be given, in which case the effect is a logical-AND. Note that VALUE may be "
- "followed by one of the characters d, m or s, in which case it will be converted from degrees, "
- "minutes or seconds into radians. This is useful for selections such as \"r<5d\".");
- group.add_option("--remove-nans",action="store_true",
- help="Removes the named source(s) from the model. NAME may contain * and ? wildcards.");
-
- group = OptionGroup(parser,"Options to manipulate fluxes etc.")
- parser.add_option_group(group);
- group.add_option("--app-to-int",action="store_true",
- help="Treat fluxes as apparent, and rescale them into intrinsic using the "+
- "supplied primary beam model (see --primary-beam option).");
- group.add_option("--int-to-app",action="store_true",
- help="Treat fluxes as intrinsic, and rescale them into apparent using the "+
- "supplied primary beam model (see --primary-beam option).");
- group.add_option("--newstar-app-to-int",action="store_true",
- help="Convert NEWSTAR apparent fluxes in input model to intrinsic. Only works for NEWSTAR or NEWSTAR-derived input models.");
- group.add_option("--newstar-int-to-app",action="store_true",
- help="Convert NEWSTAR intrinsic fluxes in input model to apparent. Only works for NEWSTAR or NEWSTAR-derived input models.");
- group.add_option("--center",type="string",metavar='COORDINATES',
- help="Override coordinates of the nominal field center specified in the input model. Use the form "+
- "\"Xdeg,Ydeg\" or \"Xdeg,Yrad\" to specify RA,Dec in degrees or radians, or else a "+
- "a pyrap.measures direction string of the form "+\
- "REF,C1,C2, for example \"j2000,1h5m0.2s,+30d14m15s\". See the pyrap.measures documentation for more details.");
- group.add_option("--refresh-r",action="store_true",
- help="Recompute the 'r' (radial distance from center) attribute of each source based on the current field center.");
- group.add_option("--ref-freq",type="float",metavar="MHz",
- help="Set or change the reference frequency of the model.");
-
- group = OptionGroup(parser,"Primary beam-related options")
- parser.add_option_group(group)
- group.add_option("--primary-beam",type="string",metavar="EXPR",
- help="""Apply a primary beam expression to estimate apparent fluxes. Any valid Python expression using the variables 'r' and 'fq' is accepted. Use "refresh" to re-estimate fluxes using the current expression.
+ group = OptionGroup(parser, "Input/output and conversion options")
+ parser.add_option_group(group)
+ group.add_option("-f", "--force", action="store_true",
+ help="Forces overwrite of output model.")
+ group.add_option("-t", "--type", choices=input_formats,
+ help="Input model type (%s). Default is %%default." % (", ".join(input_formats)))
+ group.add_option("-o", "--output-type", choices=output_formats, metavar="TYPE",
+ help="Output model type (%s). Default is %%default." % (", ".join(output_formats)))
+ group.add_option("-a", "--append", metavar="FILENAME", action="append",
+ help="Append another model to input model. May be given multiple times.")
+ group.add_option("--append-type", choices=input_formats, metavar="TYPE",
+ help="Appended model type (%s). Default is %%default." % (", ".join(input_formats)))
+ group.add_option("--format", type="string",
+ help="""Input format, for ASCII or BBS tables. For ASCII tables, default is "%s". For BBS tables, the default format is specified in the file header.""" % ASCII.DefaultDMSFormatString)
+ group.add_option("--append-format", type="string", default="",
+ help="""Format of appended file, for ASCII or BBS tables. Default is to use --format.""")
+ group.add_option("--output-format", type="string", metavar="FORMAT",
+ help="""Output format, for ASCII or BBS tables. If the model was originally imported from an ASCII or BBS table, the default output format will be the same as the original format.""")
+ group.add_option("--help-format", action="store_true",
+ help="Prints help on format strings.")
+ group.add_option("--min-extent", type="float", metavar="ARCSEC",
+ help="Minimal source extent, when importing NEWSTAR or ASCII files. Sources with a smaller extent will be treated as point sources. Default is %default.")
+
+ group = OptionGroup(parser, "Options to select a subset of the input")
+ parser.add_option_group(group)
+ group.add_option("-T", "--tags", type="string", action="append", metavar="TAG",
+ help="Extract sources with the specified tags.")
+ group.add_option("--select", type="string", metavar='TAG<>VALUE', action="append",
+ help="Selects a subset of sources by comparing the named TAG to a float VALUE. '<>' " +
+ "represents the comparison operator, and can be one of == (or =),!=,<=,<,>,>=. Alternatively, " +
+ "you may use the FORTRAN-style operators .eq.,.ne.,.le.,.lt.,.gt.,.ge. Multiple " +
+ "select options may be given, in which case the effect is a logical-AND. Note that VALUE may be "
+ "followed by one of the characters d, m or s, in which case it will be converted from degrees, "
+ "minutes or seconds into radians. This is useful for selections such as \"r<5d\".")
+ group.add_option("--remove-nans", action="store_true",
+ help="Removes the named source(s) from the model. NAME may contain * and ? wildcards.")
+
+ group = OptionGroup(parser, "Options to manipulate fluxes etc.")
+ parser.add_option_group(group)
+ group.add_option("--app-to-int", action="store_true",
+ help="Treat fluxes as apparent, and rescale them into intrinsic using the " +
+ "supplied primary beam model (see --primary-beam option).")
+ group.add_option("--int-to-app", action="store_true",
+ help="Treat fluxes as intrinsic, and rescale them into apparent using the " +
+ "supplied primary beam model (see --primary-beam option).")
+ group.add_option("--newstar-app-to-int", action="store_true",
+ help="Convert NEWSTAR apparent fluxes in input model to intrinsic. Only works for NEWSTAR or NEWSTAR-derived input models.")
+ group.add_option("--newstar-int-to-app", action="store_true",
+ help="Convert NEWSTAR intrinsic fluxes in input model to apparent. Only works for NEWSTAR or NEWSTAR-derived input models.")
+ group.add_option("--center", type="string", metavar='COORDINATES',
+ help="Override coordinates of the nominal field center specified in the input model. Use the form " +
+ "\"Xdeg,Ydeg\" or \"Xdeg,Yrad\" to specify RA,Dec in degrees or radians, or else a " +
+ "a pyrap.measures direction string of the form " + \
+ "REF,C1,C2, for example \"j2000,1h5m0.2s,+30d14m15s\". See the pyrap.measures documentation for more details.")
+ group.add_option("--refresh-r", action="store_true",
+ help="Recompute the 'r' (radial distance from center) attribute of each source based on the current field center.")
+ group.add_option("--ref-freq", type="float", metavar="MHz",
+ help="Set or change the reference frequency of the model.")
+
+ group = OptionGroup(parser, "Primary beam-related options")
+ parser.add_option_group(group)
+ group.add_option("--primary-beam", type="string", metavar="EXPR",
+ help="""Apply a primary beam expression to estimate apparent fluxes. Any valid Python expression using the variables 'r' and 'fq' is accepted. Use "refresh" to re-estimate fluxes using the current expression.
Example (for the WSRT-like 25m dish PB): "cos(min(65*fq*1e-9*r,1.0881))**6".
- OR: give a set of FITS primary beam patterns of the form e.g. FILENAME_$(xy)_$(reim).fits, these are the same FITS files used in MeqTrees pybeams_fits.""");
- group.add_option("--linear-pol",action="store_true",
- help="Use XY basis correlations for beam filenames and Mueller matrices. Default is RL.")
- group.add_option("--fits-l-axis",type="string",default="-X",
- help="CTYPE for L axis in the FITS PB file. Note that our internal L points East (increasing RA), if the "
- "FITS beam axis points the opposite way, prefix the CTYPE with a '-'' character.")
- group.add_option("--fits-m-axis",type="string",default="Y",
- help="CTYPE for M axis in the FITS PB file. Note that our internal M points North (increasing Dec), if the "
- "FITS beam axis points the opposite way, prefix the CTYPE with a '-'' character.")
- group.add_option("--beam-freq",type="float",metavar="MHz",
- help="use given frequency for primary beam model, rather than the model reference frequency");
- group.add_option("--beam-clip",type="float",metavar="GAIN",default=0.001,
- help="when using a FITS beam, clip (power) beam gains at this level to keep intrinsic source fluxes from blowing up. Sources below this beamgain will be tagged 'nobeam'. Default: %default");
- group.add_option("--beam-spi",type="float",metavar="MHz",
- help="perform a spectral index fit to each source based on a frequency dependent FITS beam, requires --primary-beam option to be used with a FITS file. "+
- "Apply this spectral index to LSM sources. "+
- "Must supply a band width (centred on --beam-freq) over which the beam spi is estimated");
- group.add_option("--force-beam-spi-wo-spectrum",action="store_true",
- help="apply beam-derived spectral indices even to sources without an intrinsic spectrum. Default "+
- "is to only apply to sources that already have a spectrum."
- );
- group.add_option("--beam-nopol",action="store_true",
- help="apply intensity beam model only, ignoring polarization. Default is to use polarization."
- );
- group.add_option("--beam-diag",action="store_true",
- help="use diagonal Jones terms only for beam model. Default is to use all four terms if available."
- );
- group.add_option("--pa",type="float",default=None,
- help="Rotate the primary beam pattern through a parallactic angle (in degrees).");
- group.add_option("--pa-range",type="str",default=None,metavar="FROM,TO",
- help="Rotate the primary beam pattern through a range of parallactic angles (in degrees) and use the average value over PA.");
- group.add_option("--pa-from-ms",type="str",default=None,metavar="MS1[:FIELD1],MS2:[FIELD2],...",
- help="Rotate the primary beam pattern through a range of parallactic angles as given by the MS and field ID (default 0), "+
- "and take the average over time. This is more accurate than --pa-range.");
- group.add_option("--beam-average-jones",action="store_true",
- help="Correct approach to rotational averaging is to convert Jones(PA) to Mueller(PA), then average "+
- "over PA. Tigger versions<=1.3.3 took the incorrect approach of averaging Jones over PA, then converting "+
- "to Mueller. Use this option to mimic the old approach.");
-
- group = OptionGroup(parser,"Options to cluster and rename sources")
- parser.add_option_group(group);
- group.add_option("--cluster-dist",type="float",metavar="ARCSEC",
- help="Distance parameter for source clustering, 0 to disable. Default is %default.");
- group.add_option("--rename",action="store_true",
- help="Rename sources according to the COPART (cluster ordering, P.A., radial distance, type) scheme");
- group.add_option("--radial-step",type="float",metavar="ARCMIN",
- help="Size of one step in radial distance for the COPART scheme. Default is %default'.");
- group.add_option("--merge-clusters",type="string",metavar="TAG(S)",
- help="Merge source clusters bearing the specified tags, replacing them with a "+ "single point source. Multiple tags may be given separated by commas. "+
- "Use 'ALL' to merge all clusters.");
- group.add_option("--prefix",type="string",
- help="Prefix all source names with the given string");
-
-
-
- group = OptionGroup(parser,"Other model manipulation options")
- parser.add_option_group(group);
- group.add_option("--remove-source",type="string",action="append",
- metavar="NAME",
- help="Removes the named source(s) from the model. NAME may contain * and ? wildcards.");
- group.add_option("--add-brick",type="string",action="append",
- metavar="NAME:FILE[:PAD_FACTOR:[TAGS:...]]",
- help="Adds a uv-brick to the model. NAME is a source name, FILE is a "+
- "FITS file, PAD_FACTOR is set to 1 if not specified. TAGS is a list of boolean flags.");
- group.add_option("--recenter",type="string",metavar='COORDINATES',
- help="Shift the sky model from the nominal center to a different field center. COORDINATES specified as per the --center option.");
-
- group = OptionGroup(parser,"Debugging and verbosity options")
- parser.add_option_group(group);
- group.add_option("-v", "--verbose",action="count",
- help="increases verbosity.");
- group.add_option("-d", "--debug",dest="debug",type="string",action="append",metavar="Context=Level",
- help="(for debugging Python code) sets verbosity level of the named Python context. May be used multiple times.");
- group.add_option("--enable-plots",action="store_true",
- help="enables various diagnostic plots");
-
- parser.set_defaults(cluster_dist=60,min_extent=0,format=None,type='auto',output_type='auto',radial_step=10,ref_freq=-1);
-
- (options,rem_args) = parser.parse_args();
- min_extent = (options.min_extent/3600)*DEG;
-
- if options.help_format:
- print ASCII.FormatHelp;
- sys.exit(0);
-
- # get filenames
- if len(rem_args) == 1:
- skymodel = rem_args[0];
- output = None;
- elif len(rem_args) == 2:
- skymodel,output = rem_args;
- else:
- parser.error("Incorrect number of arguments. Use -h for help.");
-
- if options.app_to_int and options.int_to_app:
- parser.error("Can't use --app-to-int and --int-to-app together.");
- if options.newstar_app_to_int and options.newstar_int_to_app:
- parser.error("Can't use --newstar-app-to-int and --newstar-int-to-app together.");
-
- global measures_dmdq;
- measures_dmdq = None;
-
- def pyrap_dmdq ():
- """Helper function: imports pyrap.measures, and returns dm,dq objects""";
- global measures_dmdq;
- if measures_dmdq is None:
- try:
- import pyrap.measures
- import pyrap.quanta
- except:
- traceback.print_exc();
- print "Failed to import pyrap.measures, which is required by one of the options you specified."
- print "You probably need to install the 'pyrap' package for this to work."
- sys.exit(1);
- measures_dmdq = pyrap.measures.measures(),pyrap.quanta
- return measures_dmdq;
-
- def convert_coordinates (coords):
- """Converts a measures coordinate string into a ra,dec pair (radians at J2000)""";
- match = re.match("^([\d.]+)(rad|deg|),([-]?[\d.]+)(rad|deg|)$",coords);
- if match:
- ra = float(match.group(1));
- dec = float(match.group(3));
- return ra*(DEG if match.group(2) == "deg" else 1),dec*(DEG if match.group(4) == "deg" else 1);
- dm,dq = pyrap_dmdq();
- try:
- coord_dir = dm.direction(*(coords.split(',')));
- coord_dir = dm.measure(coord_dir,'j2000');
- qq = dm.get_value(coord_dir);
- return [ q.get_value('rad') for q in qq ];
- except:
- print "Error parsing or converting coordinate string '%s', see traceback:"%coords;
- traceback.print_exc();
- sys.exit(1);
-
- # figure out center and recenter option
- if options.recenter:
- recenter_radec = convert_coordinates(options.recenter);
- if options.center:
- center_radec = convert_coordinates(options.center);
- options.refresh_r = True;
- else:
- center_radec = None;
-
-
- # check the 'select' option
- select_predicates = {
- '=':lambda x,y:x==y,
- '==':lambda x,y:x==y,
- '!=':lambda x,y:x!=y,
- '>=':lambda x,y:x>=y,
- '<=':lambda x,y:x<=y,
- '>' :lambda x,y:x>y,
- '<' :lambda x,y:x=y,
- '.le.':lambda x,y:x<=y,
- '.gt.' :lambda x,y:x>y,
- '.lt.' :lambda x,y:x!.]+)(%s)([^dms]+)([dms])?"%"|".join([ key.replace('.','\.') for key in select_predicates.keys()]),selstr);
- if not match:
- parser.error("Malformed --select string '%s'."%selstr);
+ OR: give a set of FITS primary beam patterns of the form e.g. FILENAME_$(xy)_$(reim).fits, these are the same FITS files used in MeqTrees pybeams_fits.""")
+ group.add_option("--linear-pol", action="store_true",
+ help="Use XY basis correlations for beam filenames and Mueller matrices. Default is RL.")
+ group.add_option("--fits-l-axis", type="string", default="-X",
+ help="CTYPE for L axis in the FITS PB file. Note that our internal L points East (increasing RA), if the "
+ "FITS beam axis points the opposite way, prefix the CTYPE with a '-'' character.")
+ group.add_option("--fits-m-axis", type="string", default="Y",
+ help="CTYPE for M axis in the FITS PB file. Note that our internal M points North (increasing Dec), if the "
+ "FITS beam axis points the opposite way, prefix the CTYPE with a '-'' character.")
+ group.add_option("--beam-freq", type="float", metavar="MHz",
+ help="use given frequency for primary beam model, rather than the model reference frequency")
+ group.add_option("--beam-clip", type="float", metavar="GAIN", default=0.001,
+ help="when using a FITS beam, clip (power) beam gains at this level to keep intrinsic source fluxes from blowing up. Sources below this beamgain will be tagged 'nobeam'. Default: %default")
+ group.add_option("--beam-spi", type="float", metavar="MHz",
+ help="perform a spectral index fit to each source based on a frequency dependent FITS beam, requires --primary-beam option to be used with a FITS file. " +
+ "Apply this spectral index to LSM sources. " +
+ "Must supply a band width (centred on --beam-freq) over which the beam spi is estimated")
+ group.add_option("--force-beam-spi-wo-spectrum", action="store_true",
+ help="apply beam-derived spectral indices even to sources without an intrinsic spectrum. Default " +
+ "is to only apply to sources that already have a spectrum."
+ )
+ group.add_option("--beam-nopol", action="store_true",
+ help="apply intensity beam model only, ignoring polarization. Default is to use polarization."
+ )
+ group.add_option("--beam-diag", action="store_true",
+ help="use diagonal Jones terms only for beam model. Default is to use all four terms if available."
+ )
+ group.add_option("--pa", type="float", default=None,
+ help="Rotate the primary beam pattern through a parallactic angle (in degrees).")
+ group.add_option("--pa-range", type="str", default=None, metavar="FROM,TO",
+ help="Rotate the primary beam pattern through a range of parallactic angles (in degrees) and use the average value over PA.")
+ group.add_option("--pa-from-ms", type="str", default=None, metavar="MS1[:FIELD1],MS2:[FIELD2],...",
+ help="Rotate the primary beam pattern through a range of parallactic angles as given by the MS and field ID (default 0), " +
+ "and take the average over time. This is more accurate than --pa-range.")
+ group.add_option("--beam-average-jones", action="store_true",
+ help="Correct approach to rotational averaging is to convert Jones(PA) to Mueller(PA), then average " +
+ "over PA. Tigger versions<=1.3.3 took the incorrect approach of averaging Jones over PA, then converting " +
+ "to Mueller. Use this option to mimic the old approach.")
+
+ group = OptionGroup(parser, "Options to cluster and rename sources")
+ parser.add_option_group(group)
+ group.add_option("--cluster-dist", type="float", metavar="ARCSEC",
+ help="Distance parameter for source clustering, 0 to disable. Default is %default.")
+ group.add_option("--rename", action="store_true",
+ help="Rename sources according to the COPART (cluster ordering, P.A., radial distance, type) scheme")
+ group.add_option("--radial-step", type="float", metavar="ARCMIN",
+ help="Size of one step in radial distance for the COPART scheme. Default is %default'.")
+ group.add_option("--merge-clusters", type="string", metavar="TAG(S)",
+ help="Merge source clusters bearing the specified tags, replacing them with a " + "single point source. Multiple tags may be given separated by commas. " +
+ "Use 'ALL' to merge all clusters.")
+ group.add_option("--prefix", type="string",
+ help="Prefix all source names with the given string")
+
+ group = OptionGroup(parser, "Other model manipulation options")
+ parser.add_option_group(group)
+ group.add_option("--remove-source", type="string", action="append",
+ metavar="NAME",
+ help="Removes the named source(s) from the model. NAME may contain * and ? wildcards.")
+ group.add_option("--add-brick", type="string", action="append",
+ metavar="NAME:FILE[:PAD_FACTOR:[TAGS:...]]",
+ help="Adds a uv-brick to the model. NAME is a source name, FILE is a " +
+ "FITS file, PAD_FACTOR is set to 1 if not specified. TAGS is a list of boolean flags.")
+ group.add_option("--recenter", type="string", metavar='COORDINATES',
+ help="Shift the sky model from the nominal center to a different field center. COORDINATES specified as per the --center option.")
+
+ group = OptionGroup(parser, "Debugging and verbosity options")
+ parser.add_option_group(group)
+ group.add_option("-v", "--verbose", action="count",
+ help="increases verbosity.")
+ group.add_option("-d", "--debug", dest="debug", type="string", action="append", metavar="Context=Level",
+ help="(for debugging Python code) sets verbosity level of the named Python context. May be used multiple times.")
+ group.add_option("--enable-plots", action="store_true",
+ help="enables various diagnostic plots")
+
+ parser.set_defaults(cluster_dist=60, min_extent=0, format=None, type='auto', output_type='auto', radial_step=10,
+ ref_freq=-1)
+
+ (options, rem_args) = parser.parse_args()
+ min_extent = (options.min_extent / 3600) * DEG
+
+ if options.help_format:
+ print ASCII.FormatHelp
+ sys.exit(0)
+
+ # get filenames
+ if len(rem_args) == 1:
+ skymodel = rem_args[0]
+ output = None
+ elif len(rem_args) == 2:
+ skymodel, output = rem_args
+ else:
+ parser.error("Incorrect number of arguments. Use -h for help.")
+
+ if options.app_to_int and options.int_to_app:
+ parser.error("Can't use --app-to-int and --int-to-app together.")
+ if options.newstar_app_to_int and options.newstar_int_to_app:
+ parser.error("Can't use --newstar-app-to-int and --newstar-int-to-app together.")
+
+ global measures_dmdq
+ measures_dmdq = None
+
+
+ def pyrap_dmdq():
+ """Helper function: imports pyrap.measures, and returns dm,dq objects"""
+ global measures_dmdq
+ if measures_dmdq is None:
+ try:
+ import pyrap.measures
+ import pyrap.quanta
+ except:
+ traceback.print_exc()
+ print "Failed to import pyrap.measures, which is required by one of the options you specified."
+ print "You probably need to install the 'pyrap' package for this to work."
+ sys.exit(1)
+ measures_dmdq = pyrap.measures.measures(), pyrap.quanta
+ return measures_dmdq
+
+
+ def convert_coordinates(coords):
+ """Converts a measures coordinate string into a ra,dec pair (radians at J2000)"""
+ match = re.match("^([\d.]+)(rad|deg|),([-]?[\d.]+)(rad|deg|)$", coords)
+ if match:
+ ra = float(match.group(1))
+ dec = float(match.group(3))
+ return ra * (DEG if match.group(2) == "deg" else 1), dec * (DEG if match.group(4) == "deg" else 1)
+ dm, dq = pyrap_dmdq()
+ try:
+ coord_dir = dm.direction(*(coords.split(',')))
+ coord_dir = dm.measure(coord_dir, 'j2000')
+ qq = dm.get_value(coord_dir)
+ return [q.get_value('rad') for q in qq]
+ except:
+ print "Error parsing or converting coordinate string '%s', see traceback:" % coords
+ traceback.print_exc()
+ sys.exit(1)
+
+
+ # figure out center and recenter option
+ if options.recenter:
+ recenter_radec = convert_coordinates(options.recenter)
+ if options.center:
+ center_radec = convert_coordinates(options.center)
+ options.refresh_r = True
+ else:
+ center_radec = None
+
+ # check the 'select' option
+ select_predicates = {
+ '=': lambda x, y: x == y,
+ '==': lambda x, y: x == y,
+ '!=': lambda x, y: x != y,
+ '>=': lambda x, y: x >= y,
+ '<=': lambda x, y: x <= y,
+ '>': lambda x, y: x > y,
+ '<': lambda x, y: x < y,
+ '.eq.': lambda x, y: x == y,
+ '.ne.': lambda x, y: x != y,
+ '.ge.': lambda x, y: x >= y,
+ '.le.': lambda x, y: x <= y,
+ '.gt.': lambda x, y: x > y,
+ '.lt.': lambda x, y: x < y
+ }
+ select_units = dict(d=DEG, m=DEG / 60, s=DEG / 3600)
+
+ selections = []
+ for selstr in (options.select or []):
+ match = re.match("^(?i)([^=<>!.]+)(%s)([^dms]+)([dms])?" % "|".join(
+ [key.replace('.', '\.') for key in select_predicates.keys()]), selstr)
+ if not match:
+ parser.error("Malformed --select string '%s'." % selstr)
+ try:
+ value = float(match.group(3))
+ except:
+ parser.error("Malformed --select string '%s': right-hand side is not a number." % selstr)
+ scale = select_units.get(match.group(4), 1.)
+ selections.append((selstr, match.group(1), select_predicates[match.group(2).lower()], value * scale))
+
+ # figure out input type
try:
- value = float(match.group(3));
+ input_type, import_func, dum, input_doc = Tigger.Models.Formats.resolveFormat(skymodel,
+ options.type if options.type != AUTO else None)
except:
- parser.error("Malformed --select string '%s': right-hand side is not a number."%selstr);
- scale = select_units.get(match.group(4),1.);
- selections.append((selstr,match.group(1),select_predicates[match.group(2).lower()],value*scale));
-
- # figure out input type
- try:
- input_type,import_func,dum,input_doc = Tigger.Models.Formats.resolveFormat(skymodel,options.type if options.type != AUTO else None);
- except:
- print "Unable to determine model type for %s, please specify one explicitly with the -t/--type option."%skymodel;
- sys.exit(1);
-
- # figure out output type, if explicitly specified
- output_type = None;
- if output is None and options.output_type == AUTO:
- options.output_type = "Tigger";
-
- if options.output_type != AUTO:
- output_type,dum,export_func,output_doc = Tigger.Models.Formats.getFormat(options.output_type);
- output_extensions = Tigger.Models.Formats.getFormatExtensions(options.output_type);
- if not export_func or not extensions:
- print "Output model type '%s' is not supported."%options.output_type;
- sys.exit(1);
-
- # figure out output name, if not specified
- if output is None:
- if not output_type:
- print "An output filename and/or an explicit output model type (-o/--output-type) must be specfified.";
- sys.exit(1);
- # get base input name
- # if input extension is "lsm.html", then split off two extensions, not just one
- basename = os.path.splitext(skymodel)[0];
- if skymodel.endswith(".lsm.html"):
- basename = os.path.splitext(basename)[0];
- output = basename + output_extensions[0];
- # else output name is specified, use this to determine format unless it is explicitly set
- elif not output_type:
+ print "Unable to determine model type for %s, please specify one explicitly with the -t/--type option." % skymodel
+ sys.exit(1)
+
+ # figure out output type, if explicitly specified
+ output_type = None
+ if output is None and options.output_type == AUTO:
+ options.output_type = "Tigger"
+
+ if options.output_type != AUTO:
+ output_type, dum, export_func, output_doc = Tigger.Models.Formats.getFormat(options.output_type)
+ output_extensions = Tigger.Models.Formats.getFormatExtensions(options.output_type)
+ if not export_func or not extensions:
+ print "Output model type '%s' is not supported." % options.output_type
+ sys.exit(1)
+
+ # figure out output name, if not specified
+ if output is None:
+ if not output_type:
+ print "An output filename and/or an explicit output model type (-o/--output-type) must be specfified."
+ sys.exit(1)
+ # get base input name
+ # if input extension is "lsm.html", then split off two extensions, not just one
+ basename = os.path.splitext(skymodel)[0]
+ if skymodel.endswith(".lsm.html"):
+ basename = os.path.splitext(basename)[0]
+ output = basename + output_extensions[0]
+ # else output name is specified, use this to determine format unless it is explicitly set
+ elif not output_type:
+ try:
+ output_type, dum, export_func, output_doc = Tigger.Models.Formats.resolveFormat(output, None)
+ except:
+ export_func = None
+ if not export_func:
+ print "Unable to determine model type for %s, please specify one explicitly with the -o/--output-type option." % output
+ sys.exit(1)
+
+ # check if we need to overwrite
+ if os.path.exists(output) and not options.force:
+ print "Output file %s already exists. Use the -f switch to overwrite." % output
+ sys.exit(1)
+
+ print "Reading %s (%s)" % (skymodel, input_doc)
+
+ # load the model
try:
- output_type,dum,export_func,output_doc = Tigger.Models.Formats.resolveFormat(output,None);
- except:
- export_func = None;
- if not export_func:
- print "Unable to determine model type for %s, please specify one explicitly with the -o/--output-type option."%output;
- sys.exit(1);
-
- # check if we need to overwrite
- if os.path.exists(output) and not options.force:
- print "Output file %s already exists. Use the -f switch to overwrite."%output;
- sys.exit(1);
-
- print "Reading %s (%s)"%(skymodel,input_doc);
-
- # load the model
- try:
- model = import_func(skymodel,min_extent=min_extent,format=options.format,center=center_radec,verbose=options.verbose);
- except Exception,exc:
- if options.verbose:
- traceback.print_exc();
- print "Error loading model:",str(exc);
- sys.exit(1);
- sources = model.sources;
- if not sources:
- print "Input model %s contains no sources"%skymodel;
- else:
- print "Model contains %d sources"%len(sources);
-
- # append, if specified
- if options.append:
- for modelnum,filename in enumerate(options.append):
- # figure out input type
- try:
- append_type,append_func,dum,append_doc = Tigger.Models.Formats.resolveFormat(filename,
- options.append_type if options.append_type != AUTO else None);
- except:
- print "Unable to determine model type for %s, please specify one explicitly with the --append-type option."%filename;
- sys.exit(1);
- print "Reading %s (%s)"%(filename,append_doc);
- # read model to be appended
- model2 = append_func(filename,min_extent=min_extent,format=options.append_format or options.format);
- if model2.sources:
- sources += model2.sources;
- for src in model2.sources:
- src.name = "M%d:%s"%(modelnum,src.name);
- # recompute 'r' attribute (unless --center is in effect, in which case it's going to be done anyway below)
- if options.refresh_r:
- for src in model2.sources:
- src.setAttribute('r',Coordinates.angular_dist_pos_angle(ra0,dec0,*model.fieldCenter())[0]);
- print "Appended %d sources from %s (%s)"%(len(model2.sources),filename,append_doc);
-
- # apply center, if specified
- if options.center:
- print "Center of field set to %s"%options.center;
- model.setFieldCenter(*center_radec);
-
- # apply selection by tag
- if options.tags:
- tags = []
- for ot in options.tags:
- tags += ot.split(",")
- for tag in tags:
- sources = [ src for src in sources if getattr(src,tag,False) ]
- if not sources:
- print "No sources left after selection by tag (-T/--tag) has been applied.";
- sys.exit(0);
- print "Selection by tag (%s) reduces this to %d sources"%(", ".join(options.tags),len(sources));
-
- # apply selection by NaN
- if options.remove_nans:
- sources = [ src for src in sources if not any([ math.isnan(x)
- for x in src.pos.ra,src.pos.dec,src.flux.I ]) ];
+ model = import_func(skymodel, min_extent=min_extent, format=options.format, center=center_radec,
+ verbose=options.verbose)
+ except Exception, exc:
+ if options.verbose:
+ traceback.print_exc()
+ print "Error loading model:", str(exc)
+ sys.exit(1)
+ sources = model.sources
if not sources:
- print "No sources left after applying --remove-nans.";
- sys.exit(0);
- print "Removing NaN positions and fluxes reduces this to %d sources"%len(sources);
-
- # remove sources
- if options.remove_source:
- import fnmatch
- remove_names = set();
- for patt in options.remove_source:
- if patt[0] == "'" and patt[-1] == "'":
- patt = patt[1:-1]
- match = fnmatch.filter([src.name for src in sources],patt.replace("\\",""))
- remove_names.update(match);
- print "Removing sources: %s matches %s"%(patt,",".join(sorted(match)));
- sources = [ src for src in sources if src.name not in remove_names ];
-
- # add brick
- if options.add_brick:
- for brickspec in options.add_brick:
- # get names, check for uniqueness
- try:
- ff = brickspec.split(':');
- srcname = ff[0];
- fitsfile = ff[1];
- pad = float(ff[2] or '1') if len(ff)>2 else 1;
- tags = ff[3:] if len(ff)>3 else [];
- except:
- parser.error("Invalid --add-brick setting %s"%brickspec);
- if [ src.name for src in sources if src.name == name ]:
- print "Error: model already contains a source named '%s'"%name;
- # add brick
- from astropy.io import fits as pyfits
- from astLib.astWCS import WCS
- input_hdu = pyfits.open(fitsfile)[0];
- hdr = input_hdu.header;
- max_flux = float(input_hdu.data.max());
- wcs = WCS(hdr,mode='pyfits');
- # Get reference pixel coordinates
- # wcs.getCentreWCSCoords() doesn't work, as that gives us the middle of the image
- # So scan the header to get the CRPIX values
- ra0 = dec0 = 1;
- for iaxis in range(hdr['NAXIS']):
- axs = str(iaxis+1);
- name = hdr.get('CTYPE'+axs,axs).upper();
- if name.startswith("RA"):
- ra0 = hdr.get('CRPIX'+axs,1)-1;
- elif name.startswith("DEC"):
- dec0 = hdr.get('CRPIX'+axs,1)-1;
- # convert pixel to degrees
- ra0,dec0 = wcs.pix2wcs(ra0,dec0);
- ra0 *= DEG;
- dec0 *= DEG;
- sx,sy = wcs.getHalfSizeDeg();
- sx *= DEG;
- sy *= DEG;
- nx,ny = input_hdu.data.shape[-1:-3:-1];
- from Tigger.Models import ModelClasses,SkyModel
- pos = ModelClasses.Position(ra0,dec0);
- flux = ModelClasses.Flux(max_flux);
- shape = ModelClasses.FITSImage(sx,sy,0,fitsfile,nx,ny,pad=pad);
- source = SkyModel.Source(srcname,pos,flux,shape=shape);
- for tag in tags:
- source.setAttribute(tag,True);
- if not options.refresh_r:
- source.setAttribute('r',Coordinates.angular_dist_pos_angle(ra0,dec0,*model.fieldCenter())[0]);
- sources.append(source);
- print "Adding FITS source %s (%s,pad=%f) with tags %s"%(srcname,fitsfile,pad,tags);
-
- # convert apparent flux to intrinsic using the NEWSTAR beam gain
- if options.newstar_app_to_int:
- nsrc = 0;
- for src in sources:
- bg = getattr(src,'newstar_beamgain',None);
- if getattr(src,'flux_apparent',None) and bg is not None:
- src.setAttribute('Iapp',src.flux.I);
- for pol in 'IQUV':
- if hasattr(src.flux,pol):
- setattr(src.flux,pol,getattr(src.flux,pol)/bg);
- src.removeAttribute('flux_apparent');
- src.setAttribute('flux_intrinsic',True);
- nsrc += 1;
- print "Converted NEWSTAR apparent to intrinsic flux for %d model sources"%nsrc;
- if len(sources) != nsrc:
- print " (%d sources were skipped for whatever reason.)"%(len(model.sources)-nsrc);
- elif options.newstar_int_to_app:
- nsrc = 0;
- for src in sources:
- bg = getattr(src,'newstar_beamgain',None);
- if getattr(src,'flux_intrinsic',None) and bg is not None:
- src.setAttribute('Iapp',src.flux.I*bg);
- for pol in 'IQUV':
- if hasattr(src.flux,pol):
- setattr(src.flux,pol,getattr(src.flux,pol)*bg);
- src.removeAttribute('flux_intrinsic');
- src.setAttribute('flux_apparent',True);
- nsrc += 1;
- print "Converted NEWSTAR apparent to intrinsic flux for %d model sources"%nsrc;
- if len(sources) != nsrc:
- print " (%d sources were skipped for whatever reason.)"%(len(model.sources)-nsrc);
-
- # set refrence frequency
- if options.ref_freq >= 0:
- model.setRefFreq(options.ref_freq*1e+6);
- print "Setting reference frequency to %f MHz"%options.ref_freq;
-
- # recenter
- if options.recenter:
- print "Shifting model to new center %s"%options.recenter;
- ra0,dec0 = model.fieldCenter();
- field_center = ra1,dec1 = recenter_radec;
- ddec = dec1 - dec0;
- cosd0,sind0 = math.cos(ddec),math.sin(ddec);
- for src in sources:
- ra,dec = src.pos.ra,src.pos.dec;
- x,y,z = math.cos(ra-ra0)*math.cos(dec),math.sin(ra-ra0)*math.cos(dec),math.sin(dec);
- x1 = cosd0*x - sind0*z;
- y1 = y;
- z1 = sind0*x + cosd0*z;
- src.pos.ra = ra1 + (math.atan2(y1,x1) if (x1 or y1) else 0);
- src.pos.dec = math.asin(z1);
- # reset model center
- model.setFieldCenter(ra1,dec1);
-
- # recompute radial distance
- if options.refresh_r:
- print "Recomputing the 'r' attribute based on the field center";
- model.recomputeRadialDistance();
-
- # select
- def getTagValue (src,tag):
- """Helper function: looks for the given tag in the source, or in its sub-objects""";
- for obj in src,src.pos,src.flux,getattr(src,'shape',None),getattr(src,'spectrum',None):
- if obj is not None and hasattr(obj,tag):
- return getattr(obj,tag);
- return None;
-
- for selstr,tag,predicate,value in selections:
- # get tag value
- srctag = [ (src,getTagValue(src,tag)) for src in model.sources ];
- sources = [ src for src,tag in srctag if tag is not None and predicate(tag,value) ];
- print "Selection '%s' leaves %d out of %d sources"%(selstr,len(sources),len(model.sources));
- if len(sources) != len(model.sources):
- model.setSources(sources);
-
- # set PB expression and estimate apparent fluxes
- pb = options.primary_beam;
- if pb == "refresh":
- pb = model.primaryBeam();
- if pb:
- print "Recalculating apparent fluxes";
- else:
- print "No primary beam expression in model, ignoring '--primary-beam refresh' option";
- if options.app_to_int or options.int_to_app:
- pb = pb or model.primaryBeam();
- if pb:
- print "Converting apparent fluxes to intrinsic" if options.app_to_int else "Converting intrinsic fluxes to apparent";
+ print "Input model %s contains no sources" % skymodel
else:
- print "No primary beam expression in model and no --primary-beam option given, cannot convert between apparent and intrinsic.";
- sys.exit(1);
- if pb:
- fitsBeam=False
- if pb.lower().endswith('.fits'): #if pb is a FITS file, load interpolator
- fitsBeam=True
-
- #Following code is nicked from Cattery/Siamese/OMS/pybeams_fits.py
- CORRS_XY = "xx","xy","yx","yy"
- CORRS_RL = "rr","rl","lr","ll"
- REIM = "re","im";
- REALIMAG = dict(re="real",im="imag");
-
- # get the Cattery
- for varname in 'CATTERY_PATH',"MEQTREES_CATTERY_PATH":
- if varname in os.environ:
- sys.path.append(os.environ[varname])
-
- import Siamese.OMS.Utils as Utils
- import Siamese
-
- def make_beam_filename (filename_pattern,corr,reim):
- """Makes beam filename for the given correlation and real/imaginary component (one of "re" or "im")"""
- return Utils.substitute_pattern(filename_pattern,
- corr=corr.lower(),xy=corr.lower(),CORR=corr.upper(),XY=corr.upper(),
- reim=reim.lower(),REIM=reim.upper(),ReIm=reim.title(),
- realimag=REALIMAG[reim].lower(),REALIMAG=REALIMAG[reim].upper(),
- RealImag=REALIMAG[reim].title());
-
- """Makes beam interpolator node for the given filename pattern."""
- filename_real = []
- filename_imag = []
- #load beam interpolator
- import Siamese.OMS.InterpolatedBeams as InterpolatedBeams
- vbs=[]
- for icorr,corr in enumerate( CORRS_XY if options.linear_pol else CORRS_RL ):
- if icorr in (1,2):
- print ' omitting %s beam due to --beam-diag'%corr
- vbs.append(0)
+ print "Model contains %d sources" % len(sources)
+
+ # append, if specified
+ if options.append:
+ for modelnum, filename in enumerate(options.append):
+ # figure out input type
+ try:
+ append_type, append_func, dum, append_doc = Tigger.Models.Formats.resolveFormat(filename,
+ options.append_type if options.append_type != AUTO else None)
+ except:
+ print "Unable to determine model type for %s, please specify one explicitly with the --append-type option." % filename
+ sys.exit(1)
+ print "Reading %s (%s)" % (filename, append_doc)
+ # read model to be appended
+ model2 = append_func(filename, min_extent=min_extent, format=options.append_format or options.format)
+ if model2.sources:
+ sources += model2.sources
+ for src in model2.sources:
+ src.name = "M%d:%s" % (modelnum, src.name)
+ # recompute 'r' attribute (unless --center is in effect, in which case it's going to be done anyway below)
+ if options.refresh_r:
+ for src in model2.sources:
+ src.setAttribute('r', Coordinates.angular_dist_pos_angle(ra0, dec0, *model.fieldCenter())[0])
+ print "Appended %d sources from %s (%s)" % (len(model2.sources), filename, append_doc)
+
+ # apply center, if specified
+ if options.center:
+ print "Center of field set to %s" % options.center
+ model.setFieldCenter(*center_radec)
+
+ # apply selection by tag
+ if options.tags:
+ tags = []
+ for ot in options.tags:
+ tags += ot.split(",")
+ for tag in tags:
+ sources = [src for src in sources if getattr(src, tag, False)]
+ if not sources:
+ print "No sources left after selection by tag (-T/--tag) has been applied."
+ sys.exit(0)
+ print "Selection by tag (%s) reduces this to %d sources" % (", ".join(options.tags), len(sources))
+
+ # apply selection by NaN
+ if options.remove_nans:
+ sources = [src for src in sources if not any([math.isnan(x)
+ for x in src.pos.ra, src.pos.dec, src.flux.I])]
+ if not sources:
+ print "No sources left after applying --remove-nans."
+ sys.exit(0)
+ print "Removing NaN positions and fluxes reduces this to %d sources" % len(sources)
+
+ # remove sources
+ if options.remove_source:
+ import fnmatch
+
+ remove_names = set()
+ for patt in options.remove_source:
+ if patt[0] == "'" and patt[-1] == "'":
+ patt = patt[1:-1]
+ match = fnmatch.filter([src.name for src in sources], patt.replace("\\", ""))
+ remove_names.update(match)
+ print "Removing sources: %s matches %s" % (patt, ",".join(sorted(match)))
+ sources = [src for src in sources if src.name not in remove_names]
+
+ # add brick
+ if options.add_brick:
+ for brickspec in options.add_brick:
+ # get names, check for uniqueness
+ try:
+ ff = brickspec.split(':')
+ srcname = ff[0]
+ fitsfile = ff[1]
+ pad = float(ff[2] or '1') if len(ff) > 2 else 1
+ tags = ff[3:] if len(ff) > 3 else []
+ except:
+ parser.error("Invalid --add-brick setting %s" % brickspec)
+ if [src.name for src in sources if src.name == name]:
+ print "Error: model already contains a source named '%s'" % name
+ # add brick
+ from astropy.io import fits as pyfits
+ from astLib.astWCS import WCS
+
+ input_hdu = pyfits.open(fitsfile)[0]
+ hdr = input_hdu.header
+ max_flux = float(input_hdu.data.max())
+ wcs = WCS(hdr, mode='pyfits')
+ # Get reference pixel coordinates
+ # wcs.getCentreWCSCoords() doesn't work, as that gives us the middle of the image
+ # So scan the header to get the CRPIX values
+ ra0 = dec0 = 1
+ for iaxis in range(hdr['NAXIS']):
+ axs = str(iaxis + 1)
+ name = hdr.get('CTYPE' + axs, axs).upper()
+ if name.startswith("RA"):
+ ra0 = hdr.get('CRPIX' + axs, 1) - 1
+ elif name.startswith("DEC"):
+ dec0 = hdr.get('CRPIX' + axs, 1) - 1
+ # convert pixel to degrees
+ ra0, dec0 = wcs.pix2wcs(ra0, dec0)
+ ra0 *= DEG
+ dec0 *= DEG
+ sx, sy = wcs.getHalfSizeDeg()
+ sx *= DEG
+ sy *= DEG
+ nx, ny = input_hdu.data.shape[-1:-3:-1]
+ from Tigger.Models import ModelClasses, SkyModel
+
+ pos = ModelClasses.Position(ra0, dec0)
+ flux = ModelClasses.Flux(max_flux)
+ shape = ModelClasses.FITSImage(sx, sy, 0, fitsfile, nx, ny, pad=pad)
+ source = SkyModel.Source(srcname, pos, flux, shape=shape)
+ for tag in tags:
+ source.setAttribute(tag, True)
+ if not options.refresh_r:
+ source.setAttribute('r', Coordinates.angular_dist_pos_angle(ra0, dec0, *model.fieldCenter())[0])
+ sources.append(source)
+ print "Adding FITS source %s (%s,pad=%f) with tags %s" % (srcname, fitsfile, pad, tags)
+
+ # convert apparent flux to intrinsic using the NEWSTAR beam gain
+ if options.newstar_app_to_int:
+ nsrc = 0
+ for src in sources:
+ bg = getattr(src, 'newstar_beamgain', None)
+ if getattr(src, 'flux_apparent', None) and bg is not None:
+ src.setAttribute('Iapp', src.flux.I)
+ for pol in 'IQUV':
+ if hasattr(src.flux, pol):
+ setattr(src.flux, pol, getattr(src.flux, pol) / bg)
+ src.removeAttribute('flux_apparent')
+ src.setAttribute('flux_intrinsic', True)
+ nsrc += 1
+ print "Converted NEWSTAR apparent to intrinsic flux for %d model sources" % nsrc
+ if len(sources) != nsrc:
+ print " (%d sources were skipped for whatever reason.)" % (len(model.sources) - nsrc)
+ elif options.newstar_int_to_app:
+ nsrc = 0
+ for src in sources:
+ bg = getattr(src, 'newstar_beamgain', None)
+ if getattr(src, 'flux_intrinsic', None) and bg is not None:
+ src.setAttribute('Iapp', src.flux.I * bg)
+ for pol in 'IQUV':
+ if hasattr(src.flux, pol):
+ setattr(src.flux, pol, getattr(src.flux, pol) * bg)
+ src.removeAttribute('flux_intrinsic')
+ src.setAttribute('flux_apparent', True)
+ nsrc += 1
+ print "Converted NEWSTAR apparent to intrinsic flux for %d model sources" % nsrc
+ if len(sources) != nsrc:
+ print " (%d sources were skipped for whatever reason.)" % (len(model.sources) - nsrc)
+
+ # set refrence frequency
+ if options.ref_freq >= 0:
+ model.setRefFreq(options.ref_freq * 1e+6)
+ print "Setting reference frequency to %f MHz" % options.ref_freq
+
+ # recenter
+ if options.recenter:
+ print "Shifting model to new center %s" % options.recenter
+ ra0, dec0 = model.fieldCenter()
+ field_center = ra1, dec1 = recenter_radec
+ ddec = dec1 - dec0
+ cosd0, sind0 = math.cos(ddec), math.sin(ddec)
+ for src in sources:
+ ra, dec = src.pos.ra, src.pos.dec
+ x, y, z = math.cos(ra - ra0) * math.cos(dec), math.sin(ra - ra0) * math.cos(dec), math.sin(dec)
+ x1 = cosd0 * x - sind0 * z
+ y1 = y
+ z1 = sind0 * x + cosd0 * z
+ src.pos.ra = ra1 + (math.atan2(y1, x1) if (x1 or y1) else 0)
+ src.pos.dec = math.asin(z1)
+ # reset model center
+ model.setFieldCenter(ra1, dec1)
+
+ # recompute radial distance
+ if options.refresh_r:
+ print "Recomputing the 'r' attribute based on the field center"
+ model.recomputeRadialDistance()
+
+
+ # select
+ def getTagValue(src, tag):
+ """Helper function: looks for the given tag in the source, or in its sub-objects"""
+ for obj in src, src.pos, src.flux, getattr(src, 'shape', None), getattr(src, 'spectrum', None):
+ if obj is not None and hasattr(obj, tag):
+ return getattr(obj, tag)
+ return None
+
+
+ for selstr, tag, predicate, value in selections:
+ # get tag value
+ srctag = [(src, getTagValue(src, tag)) for src in model.sources]
+ sources = [src for src, tag in srctag if tag is not None and predicate(tag, value)]
+ print "Selection '%s' leaves %d out of %d sources" % (selstr, len(sources), len(model.sources))
+ if len(sources) != len(model.sources):
+ model.setSources(sources)
+
+ # set PB expression and estimate apparent fluxes
+ pb = options.primary_beam
+ if pb == "refresh":
+ pb = model.primaryBeam()
+ if pb:
+ print "Recalculating apparent fluxes"
else:
- # make FITS images or nulls for real and imaginary part
- filenames = [ make_beam_filename(pb,corr,'re'), make_beam_filename(pb,corr,'im') ]
- print 'Loading FITS Beams',filenames[0],filenames[1]
- vb = InterpolatedBeams.LMVoltageBeam(verbose=(options.verbose or 0)-2,l_axis=options.fits_l_axis,m_axis=options.fits_m_axis)
- vb.read(*filenames)
- vbs.append(vb)
-
- model.setPrimaryBeam(vbs);
- # get frequency
- # fq = model.refFreq() or 1.4e+9;
- beamRefFreq = (options.beam_freq or 0)*1e+6 or model.refFreq() or 1424500000.12
- print "Using FITS beams with reference frequency %f MHz"%(beamRefFreq*1e-6);
-
- else: #else, assume pb is an expession
- try:
- pbexp = eval('lambda r,fq:'+pb);
- dum = pbexp(0,1e+9); # evaluate at r=0 and 1 GHz as a test
- if not isinstance(dum,float):
- raise TypeError,"does not evaluate to a float";
- except Exception,exc:
- print "Bad primary beam expression '%s': %s"%(pb,str(exc));
- sys.exit(1);
- model.setPrimaryBeam(pb);
- # get frequency
- # fq = model.refFreq() or 1.4e+9;
- fq = (options.beam_freq or 0)*1e+6 or model.refFreq() or 1424500000.12
- print "Using beam expression '%s' with reference frequency %f MHz"%(pb,fq*1e-6);
-
- nsrc = 0;
- # ensure that every source has an 'r' attribute
- if not options.refresh_r:
- for src in sources:
- if not hasattr(src,'r'):
- src.setAttribute('r',Coordinates.angular_dist_pos_angle(src.pos.ra,src.pos.dec,*model.fieldCenter())[0]);
- # evaluate sources
- if not (options.app_to_int or options.int_to_app):
- for src in sources:
- r = getattr(src,'r',None);
- if r is not None:
- bg = pbexp(r,fq);
- src.setAttribute('beamgain',bg);
- src.setAttribute('Iapp',src.flux.I*bg);
- nsrc += 1;
- print "Applied primary beam expression to %d model sources"%nsrc;
- else:
- # precompute PAs if fitsBeams are used
- if fitsBeam:
- if options.pa_from_ms is not None:
- ms_strings = options.pa_from_ms.split(",")
- ms_field = []
- if len(ms_strings)>1:
- for ms_string in ms_strings:
- match = re.match("^(.*?)(:[0-9]+)?$",ms_string);
- if match:
- msname,field = match.group(1), int(match.group(2)[1:]) if match.group(2) else 0;
- else:
- msname,field = options.pa_from_ms,0;
- ms_field.append( (msname, field) )
- else:
- ms_string = ms_strings[0]
- match = re.match("^(.*?)(:[0-9]+)?$",ms_string);
- if match:
- msname,field = match.group(1), int(match.group(2)[1:]) if match.group(2) else 0;
- if os.path.exists(msname+"/SUBMSS"):
- ms_field = [ (ms,field) for ms in glob.glob(msname+"/SUBMSS/*") if os.path.isdir(ms) ];
- else:
- ms_field = [ [msname, 0] ];
- from pyrap.tables import table
- dm,dq = pyrap_dmdq();
- pas = [];
- zenith = dm.direction('AZEL','0deg','90deg')
- for ms,field in ms_field:
- print "Getting PA range from MS %s, field %d"%(ms, field);
- tab = table(ms)
- antpos = table(tab.getkeyword("ANTENNA")).getcol("POSITION");
- ra,dec = table(tab.getkeyword("FIELD")).getcol("PHASE_DIR",field,1)[0][0]
- # make position measure from antenna 0
- pos0 = dm.position('itrf',*[ dq.quantity(x,'m') for x in antpos[0]])
- dm.do_frame(pos0);
- # make direction measure from field centre
- fld = dm.direction('J2000',dq.quantity(ra,"rad"),dq.quantity(dec,"rad"))
- tab = tab.query("FIELD_ID==%d"%field);
- # get unique times
- times = numpy.array(sorted(set(tab.getcol("TIME")[~tab.getcol("FLAG_ROW")])));
- pa1 = [ (dm.do_frame(dm.epoch("UTC",dq.quantity(t,"s"))) and dm.posangle(fld,zenith).get_value("rad")) for t in times ];
- pas += pa1;
- pa1 = numpy.array(pa1)/DEG;
- if options.enable_plots:
- import pylab
- pylab.plot((times-times[0])/3600,pa1);
- pylab.xlabel("Time since beginning of observation, hours")
- pylab.ylabel("PA, degrees");
- pylab.savefig(os.path.basename(ms)+".parangle.png")
- print "Saved plot "+os.path.basename(ms)+".parangle.png"
- print "MS %s, PA range is %fdeg to %fdeg"%(ms,pa1[0],pa1[-1]);
- # get lm's rotated through those ranges
- pa_range = numpy.array(pas);
- elif options.pa_range is not None:
- try:
- ang0,ang1 = map(float,options.pa_range.split(",",1));
- except:
- parser.error("Incorrect --pa-range option. FROM,TO values expected.");
- pa_range = numpy.arange(ang0,ang1+1,1)*DEG
- elif options.pa is not None:
- pa_range = options.pa*DEG;
+ print "No primary beam expression in model, ignoring '--primary-beam refresh' option"
+ if options.app_to_int or options.int_to_app:
+ pb = pb or model.primaryBeam()
+ if pb:
+ print "Converting apparent fluxes to intrinsic" if options.app_to_int else "Converting intrinsic fluxes to apparent"
else:
- pa_range = None;
- if options.verbose:
- print "PA (deg):"," ".join([ "%f"%(x/DEG) for x in pa_range ]) if numpy.iterable(pa_range) else pa_range
- if options.enable_plots:
- import pylab
- pylab.figure()
- for src in sources:
- r = getattr(src,'r',None);
- if r is not None:
- if fitsBeam:
- #this is where the interpolator is called to determine the beam gain
- #AIPS Memo 27 Sin Projection
- ra0,dec0 = model.fieldCenter()
- #ra0 = sources[0].pos.ra
- #dec0 = sources[0].pos.dec
- l = math.cos(src.pos.dec)*math.sin(src.pos.ra-ra0)
- m = math.sin(src.pos.dec)*math.cos(dec0)-math.cos(src.pos.dec)*math.sin(dec0)*math.cos(src.pos.ra-ra0)
-
- # rotate through (range of) PA value(s), if such option is supplied above
- if pa_range is not None:
- l,m = rotatelm(l,m,pa_range);
-
- Jones2Mueller = Jones2Mueller_linear if options.linear_pol else Jones2Mueller_circular
-
- jones = [ vb.interpolate(l,m,freq=beamRefFreq) if vb else numpy.array(0) for vb in vbs ]
- # incorrect old-style Jones averaging
- if options.beam_average_jones:
- a,b,c,d = [ j.mean() for j in jones ]
- mueller = Jones2Mueller(numpy.matrix([[a,b],[c,d]]))
- if options.verbose > 1:
- print "%s: jones11 mean %f std %f"%(src.name,abs(a),abs(jones[0]).std())
- print "%s: jones22 mean %f std %f"%(src.name,abs(d),abs(jones[3]).std())
- if options.enable_plots:
- pylab.plot(abs(jones[0]),label="|J11| "+src.name)
- # new-style averaging of Mueller matrix
- else:
- muellers = [ Jones2Mueller(numpy.matrix([[a,b],[c,d]])) for a,b,c,d in numpy.broadcast(*jones) ]
- mueller = sum(muellers) / len(muellers)
- if options.enable_plots:
- pylab.plot([ m[0,0] for m in muellers ],label='M11 '+src.name)
- if options.verbose > 1:
- print "%s: jones11 mean %f std %f"%(src.name,abs(jones[0].mean()),abs(jones[0]).std())
- print "%s: jones22 mean %f std %f"%(src.name,abs(jones[3].mean()),abs(jones[3]).std())
- print "%s: mueller11 mean %f std %f"%(src.name,mueller[0,0],numpy.std([ m[0,0] for m in muellers ]))
- bg = mueller[0,0]
- ## OMS 6/7/2015: let's do full inversion now to correct all four polarizations
- if options.app_to_int:
- if options.beam_nopol:
- mueller = 1/bg
- else:
- mueller = numpy.linalg.inv(mueller)
- else:
- if options.beam_nopol:
- mueller = bg
- ## #for now, ignore full Stokes and just use Stokes' I
- # src.setAttribute('beamgain',bg);
- nobeam = ( bg < options.beam_clip );
- spi = freqgrid = spiBg = None;
- # if no beam gain at this position, set appropriate tag
- if nobeam:
- src.setAttribute('nobeam',True);
- src.setAttribute('Iapp',src.flux.I);
- else:
- src.removeAttribute('nobeam');
- src.setAttribute('beamgain',bg);
- iquv0 = numpy.matrix([[getattr(src.flux,stokes,0.)] for stokes in "IQUV" ])
- iquv = mueller*iquv0
- if options.verbose > 1:
- print "%s: from %s to %s" % (src.name, iquv0.T, iquv.T)
- if options.app_to_int and hasattr(src.flux,"I"):
- src.setAttribute("Iapp",src.flux.I)
- for i,stokes in enumerate("IQUV"):
- if hasattr(src.flux, stokes):
- setattr(src.flux, stokes, iquv[i,0])
- #add spectral index of position in the beam
- src_spectrum = getattr(src,'spectrum',None);
- if options.beam_spi and (src_spectrum or options.force_beam_spi_wo_spectrum):
- #determine spectral index by determining bg across the freqs (using only Stokes' I)
- import scipy.optimize
- bw = options.beam_spi*1e+6/2;
- # make a frequency grid of 10 points across the band
- #freqgrid = numpy.arange(beamRefFreq-bw,beamRefFreq+bw,bw/5);
- freqgrid = numpy.arange(beamRefFreq-bw,beamRefFreq+bw*1.01,bw/5)
- gxx=vbs[0].interpolate(l,m,freq=freqgrid,freqaxis=2)
- gyy=vbs[3].interpolate(l,m,freq=freqgrid,freqaxis=2)
- spiBg=(gxx*gxx.conj()+gyy*gyy.conj()).real
- spiBg=spiBg[:,0,:]
- #power law fit
- logbg1=numpy.log10(spiBg)
- logbg=numpy.log10(spiBg.mean(axis=0))
- logfreq=numpy.log10(freqgrid)
- fitfunc = lambda p, x: p[0] + p[1] * x
- errfunc = lambda p, x, y: (y - fitfunc(p, x))
- pinit=[10**logbg[0],0.]
- if numpy.isinf(logbg).sum()>0:
- spi=0.
- amp0=spiBg[0,0]
+ print "No primary beam expression in model and no --primary-beam option given, cannot convert between apparent and intrinsic."
+ sys.exit(1)
+ if pb:
+ fitsBeam = False
+ if pb.lower().endswith('.fits'): # if pb is a FITS file, load interpolator
+ fitsBeam = True
+
+ # Following code is nicked from Cattery/Siamese/OMS/pybeams_fits.py
+ CORRS_XY = "xx", "xy", "yx", "yy"
+ CORRS_RL = "rr", "rl", "lr", "ll"
+ REIM = "re", "im"
+ REALIMAG = dict(re="real", im="imag")
+
+ # get the Cattery
+ for varname in 'CATTERY_PATH', "MEQTREES_CATTERY_PATH":
+ if varname in os.environ:
+ sys.path.append(os.environ[varname])
+
+ import Siamese.OMS.Utils as Utils
+
+
+ def make_beam_filename(filename_pattern, corr, reim):
+ """Makes beam filename for the given correlation and real/imaginary component (one of "re" or "im")"""
+ return Utils.substitute_pattern(filename_pattern,
+ corr=corr.lower(), xy=corr.lower(), CORR=corr.upper(), XY=corr.upper(),
+ reim=reim.lower(), REIM=reim.upper(), ReIm=reim.title(),
+ realimag=REALIMAG[reim].lower(), REALIMAG=REALIMAG[reim].upper(),
+ RealImag=REALIMAG[reim].title())
+
+
+ """Makes beam interpolator node for the given filename pattern."""
+ filename_real = []
+ filename_imag = []
+ # load beam interpolator
+ import Siamese.OMS.InterpolatedBeams as InterpolatedBeams
+
+ vbs = []
+ for icorr, corr in enumerate(CORRS_XY if options.linear_pol else CORRS_RL):
+ if icorr in (1, 2):
+ print ' omitting %s beam due to --beam-diag' % corr
+ vbs.append(0)
else:
- out=scipy.optimize.leastsq(errfunc,pinit,args=(logfreq,logbg))
- spi=out[0][1]
- amp0=10.**out[0][0]
+ # make FITS images or nulls for real and imaginary part
+ filenames = [make_beam_filename(pb, corr, 're'), make_beam_filename(pb, corr, 'im')]
+ print 'Loading FITS Beams', filenames[0], filenames[1]
+ vb = InterpolatedBeams.LMVoltageBeam(verbose=(options.verbose or 0) - 2, l_axis=options.fits_l_axis,
+ m_axis=options.fits_m_axis)
+ vb.read(*filenames)
+ vbs.append(vb)
+
+ model.setPrimaryBeam(vbs)
+ # get frequency
+ # fq = model.refFreq() or 1.4e+9
+ beamRefFreq = (options.beam_freq or 0) * 1e+6 or model.refFreq() or 1424500000.12
+ print "Using FITS beams with reference frequency %f MHz" % (beamRefFreq * 1e-6)
+
+ else: # else, assume pb is an expession
+ try:
+ pbexp = eval('lambda r,fq:' + pb)
+ dum = pbexp(0, 1e+9); # evaluate at r=0 and 1 GHz as a test
+ if not isinstance(dum, float):
+ raise TypeError, "does not evaluate to a float"
+ except Exception, exc:
+ print "Bad primary beam expression '%s': %s" % (pb, str(exc))
+ sys.exit(1)
+ model.setPrimaryBeam(pb)
+ # get frequency
+ # fq = model.refFreq() or 1.4e+9
+ fq = (options.beam_freq or 0) * 1e+6 or model.refFreq() or 1424500000.12
+ print "Using beam expression '%s' with reference frequency %f MHz" % (pb, fq * 1e-6)
+
+ nsrc = 0
+ # ensure that every source has an 'r' attribute
+ if not options.refresh_r:
+ for src in sources:
+ if not hasattr(src, 'r'):
+ src.setAttribute('r',
+ Coordinates.angular_dist_pos_angle(src.pos.ra, src.pos.dec, *model.fieldCenter())[
+ 0])
+ # evaluate sources
+ if not (options.app_to_int or options.int_to_app):
+ for src in sources:
+ r = getattr(src, 'r', None)
+ if r is not None:
+ bg = pbexp(r, fq)
+ src.setAttribute('beamgain', bg)
+ src.setAttribute('Iapp', src.flux.I * bg)
+ nsrc += 1
+ print "Applied primary beam expression to %d model sources" % nsrc
+ else:
+ # precompute PAs if fitsBeams are used
+ if fitsBeam:
+ if options.pa_from_ms is not None:
+ ms_strings = options.pa_from_ms.split(",")
+ ms_field = []
+ if len(ms_strings) > 1:
+ for ms_string in ms_strings:
+ match = re.match("^(.*?)(:[0-9]+)?$", ms_string)
+ if match:
+ msname, field = match.group(1), int(match.group(2)[1:]) if match.group(2) else 0
+ else:
+ msname, field = options.pa_from_ms, 0
+ ms_field.append((msname, field))
+ else:
+ ms_string = ms_strings[0]
+ match = re.match("^(.*?)(:[0-9]+)?$", ms_string)
+ if match:
+ msname, field = match.group(1), int(match.group(2)[1:]) if match.group(2) else 0
+ if os.path.exists(msname + "/SUBMSS"):
+ ms_field = [(ms, field) for ms in glob.glob(msname + "/SUBMSS/*") if os.path.isdir(ms)]
+ else:
+ ms_field = [[msname, 0]]
+ from pyrap.tables import table
+
+ dm, dq = pyrap_dmdq()
+ pas = []
+ zenith = dm.direction('AZEL', '0deg', '90deg')
+ for ms, field in ms_field:
+ print "Getting PA range from MS %s, field %d" % (ms, field)
+ tab = table(ms)
+ antpos = table(tab.getkeyword("ANTENNA")).getcol("POSITION")
+ ra, dec = table(tab.getkeyword("FIELD")).getcol("PHASE_DIR", field, 1)[0][0]
+ # make position measure from antenna 0
+ pos0 = dm.position('itrf', *[dq.quantity(x, 'm') for x in antpos[0]])
+ dm.do_frame(pos0)
+ # make direction measure from field centre
+ fld = dm.direction('J2000', dq.quantity(ra, "rad"), dq.quantity(dec, "rad"))
+ tab = tab.query("FIELD_ID==%d" % field)
+ # get unique times
+ times = numpy.array(sorted(set(tab.getcol("TIME")[~tab.getcol("FLAG_ROW")])))
+ pa1 = [(dm.do_frame(dm.epoch("UTC", dq.quantity(t, "s"))) and dm.posangle(fld,
+ zenith).get_value(
+ "rad")) for t in times]
+ pas += pa1
+ pa1 = numpy.array(pa1) / DEG
+ if options.enable_plots:
+ import pylab
+
+ pylab.plot((times - times[0]) / 3600, pa1)
+ pylab.xlabel("Time since beginning of observation, hours")
+ pylab.ylabel("PA, degrees")
+ pylab.savefig(os.path.basename(ms) + ".parangle.png")
+ print "Saved plot " + os.path.basename(ms) + ".parangle.png"
+ print "MS %s, PA range is %fdeg to %fdeg" % (ms, pa1[0], pa1[-1])
+ # get lm's rotated through those ranges
+ pa_range = numpy.array(pas)
+ elif options.pa_range is not None:
+ try:
+ ang0, ang1 = map(float, options.pa_range.split(",", 1))
+ except:
+ parser.error("Incorrect --pa-range option. FROM,TO values expected.")
+ pa_range = numpy.arange(ang0, ang1 + 1, 1) * DEG
+ elif options.pa is not None:
+ pa_range = options.pa * DEG
+ else:
+ pa_range = None
+ if options.verbose:
+ print "PA (deg):", " ".join(["%f" % (x / DEG) for x in pa_range]) if numpy.iterable(
+ pa_range) else pa_range
+ if options.enable_plots:
+ import pylab
+ pylab.figure()
+ for src in sources:
+ r = getattr(src, 'r', None)
+ if r is not None:
+ if fitsBeam:
+ # this is where the interpolator is called to determine the beam gain
+ # AIPS Memo 27 Sin Projection
+ ra0, dec0 = model.fieldCenter()
+ # ra0 = sources[0].pos.ra
+ # dec0 = sources[0].pos.dec
+ l = math.cos(src.pos.dec) * math.sin(src.pos.ra - ra0)
+ m = math.sin(src.pos.dec) * math.cos(dec0) - math.cos(src.pos.dec) * math.sin(dec0) * math.cos(
+ src.pos.ra - ra0)
- #look for Spectral Index in spi attribute
- #if no spectrum: add a SpectralIndex class to the source
- #else: add spectral index from PB to SI (int-to-app), subtract (app-to-int)
- if src_spectrum is None:
- setattr(src,'spectrum',Tigger.Models.ModelClasses.SpectralIndex(spi,beamRefFreq))
+ # rotate through (range of) PA value(s), if such option is supplied above
+ if pa_range is not None:
+ l, m = rotatelm(l, m, pa_range)
+
+ Jones2Mueller = Jones2Mueller_linear if options.linear_pol else Jones2Mueller_circular
+
+ jones = [vb.interpolate(l, m, freq=beamRefFreq) if vb else numpy.array(0) for vb in vbs]
+ # incorrect old-style Jones averaging
+ if options.beam_average_jones:
+ a, b, c, d = [j.mean() for j in jones]
+ mueller = Jones2Mueller(numpy.matrix([[a, b], [c, d]]))
+ if options.verbose > 1:
+ print "%s: jones11 mean %f std %f" % (src.name, abs(a), abs(jones[0]).std())
+ print "%s: jones22 mean %f std %f" % (src.name, abs(d), abs(jones[3]).std())
+ if options.enable_plots:
+ pylab.plot(abs(jones[0]), label="|J11| " + src.name)
+ # new-style averaging of Mueller matrix
+ else:
+ muellers = [Jones2Mueller(numpy.matrix([[a, b], [c, d]])) for a, b, c, d in
+ numpy.broadcast(*jones)]
+ mueller = sum(muellers) / len(muellers)
+ if options.enable_plots:
+ pylab.plot([m[0, 0] for m in muellers], label='M11 ' + src.name)
+ if options.verbose > 1:
+ print "%s: jones11 mean %f std %f" % (
+ src.name, abs(jones[0].mean()), abs(jones[0]).std())
+ print "%s: jones22 mean %f std %f" % (
+ src.name, abs(jones[3].mean()), abs(jones[3]).std())
+ print "%s: mueller11 mean %f std %f" % (
+ src.name, mueller[0, 0], numpy.std([m[0, 0] for m in muellers]))
+ bg = mueller[0, 0]
+ ## OMS 6/7/2015: let's do full inversion now to correct all four polarizations
+ if options.app_to_int:
+ if options.beam_nopol:
+ mueller = 1 / bg
+ else:
+ mueller = numpy.linalg.inv(mueller)
+ else:
+ if options.beam_nopol:
+ mueller = bg
+ ## #for now, ignore full Stokes and just use Stokes' I
+ # src.setAttribute('beamgain',bg)
+ nobeam = (bg < options.beam_clip)
+ spi = freqgrid = spiBg = None
+ # if no beam gain at this position, set appropriate tag
+ if nobeam:
+ src.setAttribute('nobeam', True)
+ src.setAttribute('Iapp', src.flux.I)
+ else:
+ src.removeAttribute('nobeam')
+ src.setAttribute('beamgain', bg)
+ iquv0 = numpy.matrix([[getattr(src.flux, stokes, 0.)] for stokes in "IQUV"])
+ iquv = mueller * iquv0
+ if options.verbose > 1:
+ print "%s: from %s to %s" % (src.name, iquv0.T, iquv.T)
+ if options.app_to_int and hasattr(src.flux, "I"):
+ src.setAttribute("Iapp", src.flux.I)
+ for i, stokes in enumerate("IQUV"):
+ if hasattr(src.flux, stokes):
+ setattr(src.flux, stokes, iquv[i, 0])
+ # add spectral index of position in the beam
+ src_spectrum = getattr(src, 'spectrum', None)
+ if options.beam_spi and (src_spectrum or options.force_beam_spi_wo_spectrum):
+ # determine spectral index by determining bg across the freqs (using only Stokes' I)
+ import scipy.optimize
+
+ bw = options.beam_spi * 1e+6 / 2
+ # make a frequency grid of 10 points across the band
+ # freqgrid = numpy.arange(beamRefFreq-bw,beamRefFreq+bw,bw/5)
+ freqgrid = numpy.arange(beamRefFreq - bw, beamRefFreq + bw * 1.01, bw / 5)
+ gxx = vbs[0].interpolate(l, m, freq=freqgrid, freqaxis=2)
+ gyy = vbs[3].interpolate(l, m, freq=freqgrid, freqaxis=2)
+ spiBg = (gxx * gxx.conj() + gyy * gyy.conj()).real
+ spiBg = spiBg[:, 0, :]
+ # power law fit
+ logbg1 = numpy.log10(spiBg)
+ logbg = numpy.log10(spiBg.mean(axis=0))
+ logfreq = numpy.log10(freqgrid)
+ fitfunc = lambda p, x: p[0] + p[1] * x
+ errfunc = lambda p, x, y: (y - fitfunc(p, x))
+ pinit = [10 ** logbg[0], 0.]
+ if numpy.isinf(logbg).sum() > 0:
+ spi = 0.
+ amp0 = spiBg[0, 0]
+ else:
+ out = scipy.optimize.leastsq(errfunc, pinit, args=(logfreq, logbg))
+ spi = out[0][1]
+ amp0 = 10. ** out[0][0]
+
+ # look for Spectral Index in spi attribute
+ # if no spectrum: add a SpectralIndex class to the source
+ # else: add spectral index from PB to SI (int-to-app), subtract (app-to-int)
+ if src_spectrum is None:
+ setattr(src, 'spectrum', Tigger.Models.ModelClasses.SpectralIndex(spi, beamRefFreq))
+ else:
+ ispiVal = getattr(src_spectrum, 'spi', None)
+ setattr(src, 'spectrum', Tigger.Models.ModelClasses.SpectralIndex(ispiVal - spi,
+ beamRefFreq) if options.app_to_int else Tigger.Models.ModelClasses.SpectralIndex(
+ ispiVal + spi, beamRefFreq))
+
+ if options.verbose:
+ print ("%s: beamgain" % src.name), bg, "spi", spi, "clipped" if nobeam else ""
+ # if spiBg is not None:
+ # print src.name,repr(freqgrid),repr(spiBg.mean(0))
+
+ else:
+ bg = pbexp(r, fq)
+ src.setAttribute('beamgain', bg)
+ if hasattr(src.flux, 'I'):
+ src.setAttribute('Iapp', src.flux.I if options.app_to_int else src.flux.I * bg)
+ for stokes in "IQUV":
+ x = getattr(src.flux, stokes, None)
+ if x is not None:
+ setattr(src.flux, stokes, x / bg if options.app_to_int else x * bg)
+ nsrc += 1
+ if options.enable_plots:
+ pylab.legend()
+ pylab.savefig("beamgains.png")
+ print "Saved plot beamgains.png"
+ print "Converted between apparent/intrinsic flux for %d model sources" % nsrc
+ if len(model.sources) != nsrc:
+ print " (%d sources were skipped for whatever reason, probably they didn't have an 'r' attribute)" % (
+ len(model.sources) - nsrc)
+
+ # rename using COPART
+ if options.rename:
+ print "Renaming sources using the COPART convention"
+ typecodes = dict(Gau="G", FITS="F")
+ # sort sources by decreasing flux
+ sources = sorted(sources, lambda a, b: cmp(b.brightness(), a.brightness()))
+ projection = Coordinates.Projection.SinWCS(*model.fieldCenter())
+ # work out source clusters
+ l = numpy.zeros(len(sources), float)
+ m = numpy.zeros(len(sources), float)
+ for i, src in enumerate(sources):
+ l[i], m[i] = projection.lm(src.pos.ra, src.pos.dec)
+ if options.cluster_dist:
+ # now, convert to dist[i,j]: distance between sources i and j
+ dist = numpy.sqrt(
+ (l[:, numpy.newaxis] - l[numpy.newaxis, :]) ** 2 + (m[:, numpy.newaxis] - m[numpy.newaxis, :]) ** 2)
+ # cluster[i] is (N,R), where N is cluster number for source #i, and R is rank of that source in the cluster
+ # place source 0 into cluster 0,#0
+ cluster = [(0, 0)]
+ clustersize = [1]
+ clusterflux = [sources[0].brightness()]
+ dist0 = options.cluster_dist * DEG / 3600
+ for i in range(1, len(sources)):
+ src = sources[i]
+ # find closest brighter source, and assign to its cluster if close enough
+ imin = dist[i, :i].argmin()
+ if dist[i, imin] <= dist0:
+ iclust, rank = cluster[imin]
+ cluster.append((iclust, clustersize[iclust]))
+ clustersize[iclust] += 1
+ clusterflux[iclust] += src.brightness()
+ # else start new cluster from source
else:
- ispiVal=getattr(src_spectrum,'spi',None)
- setattr(src,'spectrum',Tigger.Models.ModelClasses.SpectralIndex(ispiVal-spi,beamRefFreq) if options.app_to_int else Tigger.Models.ModelClasses.SpectralIndex(ispiVal+spi,beamRefFreq));
-
- if options.verbose:
- print ("%s: beamgain"%src.name),bg,"spi",spi,"clipped" if nobeam else "";
- # if spiBg is not None:
- # print src.name,repr(freqgrid),repr(spiBg.mean(0));
-
- else:
- bg = pbexp(r,fq);
- src.setAttribute('beamgain',bg);
- if hasattr(src.flux,'I'):
- src.setAttribute('Iapp',src.flux.I if options.app_to_int else src.flux.I*bg);
- for stokes in "IQUV":
- x = getattr(src.flux,stokes,None);
- if x is not None:
- setattr(src.flux,stokes,x/bg if options.app_to_int else x*bg);
- nsrc += 1;
- if options.enable_plots:
- pylab.legend()
- pylab.savefig("beamgains.png")
- print "Saved plot beamgains.png"
- print "Converted between apparent/intrinsic flux for %d model sources"%nsrc;
- if len(model.sources) != nsrc:
- print " (%d sources were skipped for whatever reason, probably they didn't have an 'r' attribute)"%(len(model.sources)-nsrc);
-
-
- # rename using COPART
- if options.rename:
- print "Renaming sources using the COPART convention"
- typecodes = dict(Gau="G",FITS="F");
- # sort sources by decreasing flux
- sources = sorted(sources,lambda a,b:cmp(b.brightness(),a.brightness()));
- projection = Coordinates.Projection.SinWCS(*model.fieldCenter());
- # work out source clusters
- l = numpy.zeros(len(sources),float);
- m = numpy.zeros(len(sources),float);
- for i,src in enumerate(sources):
- l[i],m[i] = projection.lm(src.pos.ra,src.pos.dec);
- if options.cluster_dist:
- # now, convert to dist[i,j]: distance between sources i and j
- dist = numpy.sqrt((l[:,numpy.newaxis]-l[numpy.newaxis,:])**2 + (m[:,numpy.newaxis]-m[numpy.newaxis,:])**2);
- # cluster[i] is (N,R), where N is cluster number for source #i, and R is rank of that source in the cluster
- # place source 0 into cluster 0,#0
- cluster = [ (0,0) ];
- clustersize = [1];
- clusterflux = [ sources[0].brightness() ];
- dist0 = options.cluster_dist*DEG/3600;
- for i in range(1,len(sources)):
- src = sources[i];
- # find closest brighter source, and assign to its cluster if close enough
- imin = dist[i,:i].argmin();
- if dist[i,imin] <= dist0:
- iclust,rank = cluster[imin];
- cluster.append((iclust,clustersize[iclust]));
- clustersize[iclust] += 1;
- clusterflux[iclust] += src.brightness();
- # else start new cluster from source
+ cluster.append((len(clustersize), 0))
+ clustersize.append(1)
+ clusterflux.append(src.brightness())
else:
- cluster.append((len(clustersize),0));
- clustersize.append(1);
- clusterflux.append(src.brightness());
+ cluster = [(i, 0) for i, src in enumerate(sources)]
+ # now go over and rename the sources
+ # make array of source names
+ chars = [chr(x) for x in range(ord('a'), ord('z') + 1)]
+ names = morenames = list(chars)
+ while len(names) < len(sources):
+ morenames = [ch + name for ch in chars for name in morenames]
+ names += morenames
+ # make a second version where the single-char names are capitalized
+ Names = list(names)
+ Names[:26] = [n.upper() for n in chars]
+ # now go over and rename the sources
+ clustername = {}
+ for i, src in enumerate(sources):
+ iclust, rank = cluster[i]
+ # for up name of cluster based on rank-0 source
+ if not rank:
+ # lookup radius, in units of arcmin
+ rad_min = math.sqrt(l[i] ** 2 + m[i] ** 2) * (60 / DEG)
+ # divide by radial step
+ rad = min(int(rad_min / options.radial_step), 10)
+ radchr = '0123456789x'[rad]
+ if rad_min > options.radial_step * 0.01:
+ # convert p.a. to tens of degrees
+ pa = math.atan2(l[i], m[i])
+ if pa < 0:
+ pa += math.pi * 2
+ pa = round(pa / (DEG * 10)) % 36
+ # make clustername
+ clusname = clustername[iclust] = "%s%02d%s" % (Names[iclust], pa, radchr)
+ else:
+ clusname = clustername[iclust] = "%s0" % (Names[iclust])
+ src.name = "%s%s" % (clusname, typecodes.get(src.typecode, ''))
+ if options.cluster_dist:
+ src.setAttribute('cluster_lead', True)
+ else:
+ clusname = clustername[iclust]
+ src.name = "%s%s%s" % (clusname, names[rank - 1], typecodes.get(src.typecode, ''))
+ if options.cluster_dist:
+ src.setAttribute('cluster', clusname)
+ src.setAttribute('cluster_size', clustersize[iclust])
+ src.setAttribute('cluster_flux', clusterflux[iclust])
+ # check for duplicate names (if renaming, duplicate names cannot happen anyway, unless the naming algorithm above is broken)
else:
- cluster = [ (i,0) for i,src in enumerate(sources) ];
- # now go over and rename the sources
- # make array of source names
- chars = [ chr(x) for x in range(ord('a'),ord('z')+1) ];
- names = morenames = list(chars);
- while len(names) < len(sources):
- morenames = [ ch+name for ch in chars for name in morenames ];
- names += morenames;
- # make a second version where the single-char names are capitalized
- Names = list(names);
- Names[:26] = [ n.upper() for n in chars ];
- # now go over and rename the sources
- clustername = {};
- for i,src in enumerate(sources):
- iclust,rank = cluster[i];
- # for up name of cluster based on rank-0 source
- if not rank:
- # lookup radius, in units of arcmin
- rad_min = math.sqrt(l[i]**2+m[i]**2)*(60/DEG);
- # divide by radial step
- rad = min(int(rad_min/options.radial_step),10);
- radchr = '0123456789x'[rad];
- if rad_min > options.radial_step*0.01:
- # convert p.a. to tens of degrees
- pa = math.atan2(l[i],m[i]);
- if pa < 0:
- pa += math.pi*2;
- pa = round(pa/(DEG*10))%36;
- # make clustername
- clusname = clustername[iclust] = "%s%02d%s"%(Names[iclust],pa,radchr);
- else:
- clusname = clustername[iclust] = "%s0"%(Names[iclust]);
- src.name = "%s%s"%(clusname,typecodes.get(src.typecode,''));
- if options.cluster_dist:
- src.setAttribute('cluster_lead',True);
- else:
- clusname = clustername[iclust];
- src.name = "%s%s%s"%(clusname,names[rank-1],typecodes.get(src.typecode,''));
- if options.cluster_dist:
- src.setAttribute('cluster',clusname);
- src.setAttribute('cluster_size',clustersize[iclust]);
- src.setAttribute('cluster_flux',clusterflux[iclust]);
- # check for duplicate names (if renaming, duplicate names cannot happen anyway, unless the naming algorithm above is broken)
- else:
- names = dict();
- sources0 = sources;
- sources = [];
- for i,src in enumerate(sources0):
- if src.name in names:
- print "Duplicate source '%s' at #%d (first found at #%d), removing"%(src.name,i,names[src.name]);
- else:
- names[src.name] = i;
- sources.append(src);
- # assign prefix to source names
- if options.prefix:
- print "Prefixing source names with '%s'"%options.prefix;
- for src in sources:
- src.name = options.prefix + src.name;
- # merge clusters
- if options.merge_clusters:
- tags = set(options.merge_clusters.split(',')) if options.merge_clusters != "ALL" else None;
- # build up dict of clusters
- clusters = dict();
- for src in sources:
- clusname = getattr(src,'cluster','');
- clusters.setdefault(clusname,{})[src.name] = src;
- # unclustered sources copied over as-is
- new_sources = clusters.pop('',{}).values();
- # next, deal with each cluster
- for clusname,srcdict in clusters.iteritems():
- # leading source has the same name as the cluster
- src0 = srcdict.get(clusname);
- # if no leading source, or leading source not tagged, or length 1, then copy cluster as-is
- if not src0 or len(srcdict)<2 or (tags is not None and
- not any([getattr(src0,tag,None) for tag in tags]) ):
- new_sources += srcdict.values();
- else:
- # sum fluxes
- for x in 'IQUV':
- if hasattr(src0.flux,x):
- setattr(src0.flux,x,sum([getattr(s.flux,x,0) for s in srcdict.itervalues()]));
- if hasattr(src0,'Iapp'):
- src0.Iapp = sum([getattr(s,'Iapp',0) for s in srcdict.itervalues()]);
- new_sources.append(src0);
- print "Merged cluster %s (%d sources)"%(src0.name,len(srcdict));
- sources = new_sources;
- model.setSources(sources);
- # save output
- print "Saving model containing %d sources to %s (%s)"%(len(sources),output,output_doc);
- export_func(model,output,sources=sources,format=options.output_format or None);
+ names = dict()
+ sources0 = sources
+ sources = []
+ for i, src in enumerate(sources0):
+ if src.name in names:
+ print "Duplicate source '%s' at #%d (first found at #%d), removing" % (src.name, i, names[src.name])
+ else:
+ names[src.name] = i
+ sources.append(src)
+ # assign prefix to source names
+ if options.prefix:
+ print "Prefixing source names with '%s'" % options.prefix
+ for src in sources:
+ src.name = options.prefix + src.name
+ # merge clusters
+ if options.merge_clusters:
+ tags = set(options.merge_clusters.split(',')) if options.merge_clusters != "ALL" else None
+ # build up dict of clusters
+ clusters = dict()
+ for src in sources:
+ clusname = getattr(src, 'cluster', '')
+ clusters.setdefault(clusname, {})[src.name] = src
+ # unclustered sources copied over as-is
+ new_sources = clusters.pop('', {}).values()
+ # next, deal with each cluster
+ for clusname, srcdict in clusters.iteritems():
+ # leading source has the same name as the cluster
+ src0 = srcdict.get(clusname)
+ # if no leading source, or leading source not tagged, or length 1, then copy cluster as-is
+ if not src0 or len(srcdict) < 2 or (tags is not None and
+ not any([getattr(src0, tag, None) for tag in tags])):
+ new_sources += srcdict.values()
+ else:
+ # sum fluxes
+ for x in 'IQUV':
+ if hasattr(src0.flux, x):
+ setattr(src0.flux, x, sum([getattr(s.flux, x, 0) for s in srcdict.itervalues()]))
+ if hasattr(src0, 'Iapp'):
+ src0.Iapp = sum([getattr(s, 'Iapp', 0) for s in srcdict.itervalues()])
+ new_sources.append(src0)
+ print "Merged cluster %s (%d sources)" % (src0.name, len(srcdict))
+ sources = new_sources
+ model.setSources(sources)
+ # save output
+ print "Saving model containing %d sources to %s (%s)" % (len(sources), output, output_doc)
+ export_func(model, output, sources=sources, format=options.output_format or None)
diff --git a/Tigger/bin/tigger-make-brick b/Tigger/bin/tigger-make-brick
index 7cbdffd..df61912 100755
--- a/Tigger/bin/tigger-make-brick
+++ b/Tigger/bin/tigger-make-brick
@@ -2,7 +2,7 @@
# -*- coding: utf-8 -*-
#
-#% $Id$
+# % $Id$
#
#
# Copyright (C) 2002-2011
@@ -26,210 +26,214 @@
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
+import math
import sys
+from math import cos
+
import os.path
+from astLib.astWCS import WCS
from astropy.io import fits as pyfits
+
import Tigger
-import math
-from math import cos
-from astLib.astWCS import WCS
-DEG = math.pi/180;
+DEG = math.pi / 180
-NATIVE = "Tigger";
+NATIVE = "Tigger"
if __name__ == '__main__':
- import Kittens.utils
- _verbosity = Kittens.utils.verbosity(name="convert-model");
- dprint = _verbosity.dprint;
- dprintf = _verbosity.dprintf;
-
- Tigger.nuke_matplotlib(); # don't let the door hit you in the ass, sucka
-
- from Tigger.Tools import Imaging
- from Tigger.Models import SkyModel,ModelClasses
-
- # setup some standard command-line option parsing
- #
- from optparse import OptionParser
- parser = OptionParser(usage="""%prog: sky_model output_image [output_model]""",
- description="""Converts sources in a sky model into a brick (FITS image.)
+ import Kittens.utils
+
+ _verbosity = Kittens.utils.verbosity(name="convert-model")
+ dprint = _verbosity.dprint
+ dprintf = _verbosity.dprintf
+
+ Tigger.nuke_matplotlib(); # don't let the door hit you in the ass, sucka
+
+ from Tigger.Tools import Imaging
+ from Tigger.Models import SkyModel, ModelClasses
+
+ # setup some standard command-line option parsing
+ #
+ from optparse import OptionParser
+
+ parser = OptionParser(usage="""%prog: sky_model output_image [output_model]""",
+ description="""Converts sources in a sky model into a brick (FITS image.)
Input 'sky_model' should be a Tigger-format sky model.
The 'output_image' should already exist. (Use lwimager or something similar to make a sky image.)
If an 'output_model' is specified, then sources converted into the brick will be removed from the model,
-while the brick itself will be added (as a FITS image component), and a new sky model will be written out.""");
- parser.add_option("-f","--force",action="store_true",
- help="Forces overwrite of output model.");
- parser.add_option("-s","--subset",type="string",
- help="Selects subset of sources. Use a comma- (or space) separated list of selection tokens. A token can be "
- "a source name, or [N]:[M] to select sources in order of brightness from N up to and not including M, or =tag to select sources "
- "with the specified tag. Prefix with ! or - to negate a selection token.");
- parser.add_option("-F","--freq",type="float",metavar="MHz",
- help="Sets the frequency at which an image will be generated. This affects sources with a spectral index or an RM. Default is to use "
- "the reference frequency of the model.");
- parser.add_option("-b","--primary-beam",type="string",metavar="EXPR",
- help="Apply a primary (power) beam expression to source fluxes. Any valid Python expression using the variables 'r' and 'fq' is accepted. "
- "Example (for the WSRT-like 25m dish PB): \"cos(min(65*fq*1e-9*r,1.0881))**6\". NB: this particular expression can be simply specified as --primary-beam wsrt. "
- "Also available is a slightly different --primary-beam newstar");
- parser.add_option("-p","--padding",type="float",metavar="PAD",
- help="Sets the pad factor attribute of the resulting FITS image component. Default is %default.");
- parser.add_option("-x","--x-offset",type="float",metavar="FRACPIX",
- help="Offsets the FITS image by this many pixels in the X direction.");
- parser.add_option("-y","--y-offset",type="float",metavar="FRACPIX",
- help="Offsets the FITS image by this many pixels in the Y direction.");
- parser.add_option("-N","--source-name",type="string",metavar="NAME",
- help="Name for source component corresponding to image. Default is to use the basename of the FITS file");
- parser.add_option("--add-to-image",action="store_true",
- help="Adds sources to contents of FITS image. Default is to overwrite image data.");
- parser.add_option("--keep-sources",action="store_true",
- help="Keeps sources in the sky model. Default is to remove sources that have been put into the brick.");
- parser.add_option("-d", "--debug",dest="verbose",type="string",action="append",metavar="Context=Level",
- help="(for debugging Python code) sets verbosity level of the named Python context. May be used multiple times.");
-
- parser.set_defaults(freq=None,padding=1,x_offset=0,y_offset=0,subset="all");
-
- (options,rem_args) = parser.parse_args();
-
- # get filenames
- if len(rem_args) == 2:
- skymodel,fitsfile = rem_args;
- output_model = None;
- elif len(rem_args) == 3:
- skymodel,fitsfile,output_model = rem_args;
- else:
- parser.error("Incorrect number of arguments. Use -h for help.");
-
- # check if we need to overwrite
- if output_model and os.path.exists(output_model) and not options.force:
- print "Output file %s already exists. Use the -f switch to overwrite."%output_model;
- sys.exit(1);
-
- # load model, apply selection
- model = Tigger.load(skymodel);
- print "Loaded model",skymodel;
- # apply selection
- sources0 = model.getSourceSubset(options.subset);
- # make sure only point sources are left
- sources = [ src for src in sources0 if src.typecode == "pnt" ];
- print "Selection leaves %d source(s), of which %d are point source(s)"%(len(sources0),len(sources));
-
- if not sources:
- print "There's nothing to convert into a brick.";
- sys.exit(1);
-
- # get PB expression
- pbfunc = None;
- if options.primary_beam:
- if options.primary_beam.upper() == "WSRT":
- pbfunc = lambda r,fq:cos(min(65*fq*1e-9*r,1.0881))**6;
- print "Primary beam expression is standard WSRT cos^6: 'cos(min(65*fq*1e-9*r,1.0881))**6'";
- elif options.primary_beam.upper() == "NEWSTAR":
- pbfunc = lambda r,fq:max(cos(65*1e-9*fq*r)**6,.01);
- print "Primary beam expression is standard NEWSTAR cos^6: 'max(cos(65*1e-9*fq*r)**6,.01)'";
+while the brick itself will be added (as a FITS image component), and a new sky model will be written out.""")
+ parser.add_option("-f", "--force", action="store_true",
+ help="Forces overwrite of output model.")
+ parser.add_option("-s", "--subset", type="string",
+ help="Selects subset of sources. Use a comma- (or space) separated list of selection tokens. A token can be "
+ "a source name, or [N]:[M] to select sources in order of brightness from N up to and not including M, or =tag to select sources "
+ "with the specified tag. Prefix with ! or - to negate a selection token.")
+ parser.add_option("-F", "--freq", type="float", metavar="MHz",
+ help="Sets the frequency at which an image will be generated. This affects sources with a spectral index or an RM. Default is to use "
+ "the reference frequency of the model.")
+ parser.add_option("-b", "--primary-beam", type="string", metavar="EXPR",
+ help="Apply a primary (power) beam expression to source fluxes. Any valid Python expression using the variables 'r' and 'fq' is accepted. "
+ "Example (for the WSRT-like 25m dish PB): \"cos(min(65*fq*1e-9*r,1.0881))**6\". NB: this particular expression can be simply specified as --primary-beam wsrt. "
+ "Also available is a slightly different --primary-beam newstar")
+ parser.add_option("-p", "--padding", type="float", metavar="PAD",
+ help="Sets the pad factor attribute of the resulting FITS image component. Default is %default.")
+ parser.add_option("-x", "--x-offset", type="float", metavar="FRACPIX",
+ help="Offsets the FITS image by this many pixels in the X direction.")
+ parser.add_option("-y", "--y-offset", type="float", metavar="FRACPIX",
+ help="Offsets the FITS image by this many pixels in the Y direction.")
+ parser.add_option("-N", "--source-name", type="string", metavar="NAME",
+ help="Name for source component corresponding to image. Default is to use the basename of the FITS file")
+ parser.add_option("--add-to-image", action="store_true",
+ help="Adds sources to contents of FITS image. Default is to overwrite image data.")
+ parser.add_option("--keep-sources", action="store_true",
+ help="Keeps sources in the sky model. Default is to remove sources that have been put into the brick.")
+ parser.add_option("-d", "--debug", dest="verbose", type="string", action="append", metavar="Context=Level",
+ help="(for debugging Python code) sets verbosity level of the named Python context. May be used multiple times.")
+
+ parser.set_defaults(freq=None, padding=1, x_offset=0, y_offset=0, subset="all")
+
+ (options, rem_args) = parser.parse_args()
+
+ # get filenames
+ if len(rem_args) == 2:
+ skymodel, fitsfile = rem_args
+ output_model = None
+ elif len(rem_args) == 3:
+ skymodel, fitsfile, output_model = rem_args
else:
- try:
- pbfunc = eval("lambda r,fq:"+options.primary_beam);
- except Exception,err:
- print "Error parsing primary beam expression %s: %s"%(options.primary_beam,str(err));
- sys.exit(1);
- print "Primary beam expression is ",options.primary_beam;
-
- # get frequency
- freq = (options.freq or model.refFreq() or 1400)*1e+6;
- print "Brick frequency is %f MHz"%(freq*1e-6);
-
- # read fits file
- try:
- input_hdu = pyfits.open(fitsfile)[0];
- hdr = input_hdu.header;
- except Exception,err:
- print "Error reading FITS file %s: %s"%(fitsfile,str(err));
- sys.exit(1);
- print "Using FITS file",fitsfile;
-
- # reset data if asked to
- if not options.add_to_image:
- input_hdu.data[...] = 0;
- print "Contents of FITS image will be reset";
- else:
- print "Adding source(s) to FITS image";
- # Parse header to figure out RA and DEC axes
- ra_axis = dec_axis = None;
- for iaxis in range(1,hdr['NAXIS']+1):
- name = hdr.get("CTYPE%d"%iaxis,'').upper();
- if name.startswith("RA"):
- ra_axis = iaxis;
- ra0pix = hdr["CRPIX%d"%iaxis]-1;
- elif name.startswith("DEC"):
- dec_axis = iaxis;
- dec0pix = hdr["CRPIX%d"%iaxis]-1;
- if ra_axis is None or dec_axis is None:
- print "Can't find RA and/or DEC axis in this FITS image";
- sys.exit(1);
-
- # make WCS from header
- wcs = WCS(hdr,mode='pyfits');
- ra0,dec0 = wcs.pix2wcs(ra0pix,dec0pix);
- print "Image reference pixel (%d,%d) is at %f,%f deg"%(ra0pix,dec0pix,ra0,dec0);
-
- # apply x/y pixel offset
- if options.x_offset or options.y_offset:
- ra0,dec0 = wcs.pix2wcs(ra0pix+options.x_offset,dec0pix+options.y_offset);
- print "Applying x/y offset moves this to %f,%f deg"%(ra0,dec0);
- hdr["CRVAL%d"%ra_axis] = ra0;
- hdr["CRVAL%d"%dec_axis] = dec0;
- wcs = WCS(hdr,mode='pyfits');
-
- # insert sources
- Imaging.restoreSources(input_hdu,sources,0,primary_beam=pbfunc,freq=freq);
- # save fits file
- try:
- input_hdu.writeto(fitsfile,clobber=True);
- except Exception,err:
- print "Error writing FITS file %s: %s"%(fitsfile,str(err));
- sys.exit(1);
- print "Added %d source(s) into FITS file %s"%(len(sources),fitsfile);
- print "Using pad factor",options.padding;
-
- # remove sources from model if asked to
- if not options.keep_sources:
- selected = set([src.name for src in sources]);
- sources = [ src for src in model.sources if not src.name in selected ];
- else:
- sources = model.sources;
-
- # add image to model
- if output_model:
- # get image parameters
- max_flux = float(input_hdu.data.max());
- ra0 *= DEG;
- dec0 *= DEG;
- sx,sy = wcs.getHalfSizeDeg();
- sx *= DEG;
- sy *= DEG;
- nx,ny = input_hdu.data.shape[-1:-3:-1];
- # check if this image is already contained in the model
- for src in model.sources:
- if isinstance(getattr(src,'shape',None),ModelClasses.FITSImage) and os.path.samefile(src.shape.filename,fitsfile):
- print "Model already contains a component (%s) for this image. Updating the component"%src.name;
- # update source parameters
- src.position.ra,src.position.dec = ra0,dec0;
- src.flux.I = max_flux;
- src.shape.ex,src.shape.ey = sx,sy;
- src.shape.nx,src.shape.ny = nx,ny;
- src.shape.pad = pad;
- break;
- # not contained, make new source object
+ parser.error("Incorrect number of arguments. Use -h for help.")
+
+ # check if we need to overwrite
+ if output_model and os.path.exists(output_model) and not options.force:
+ print "Output file %s already exists. Use the -f switch to overwrite." % output_model
+ sys.exit(1)
+
+ # load model, apply selection
+ model = Tigger.load(skymodel)
+ print "Loaded model", skymodel
+ # apply selection
+ sources0 = model.getSourceSubset(options.subset)
+ # make sure only point sources are left
+ sources = [src for src in sources0 if src.typecode == "pnt"]
+ print "Selection leaves %d source(s), of which %d are point source(s)" % (len(sources0), len(sources))
+
+ if not sources:
+ print "There's nothing to convert into a brick."
+ sys.exit(1)
+
+ # get PB expression
+ pbfunc = None
+ if options.primary_beam:
+ if options.primary_beam.upper() == "WSRT":
+ pbfunc = lambda r, fq: cos(min(65 * fq * 1e-9 * r, 1.0881)) ** 6
+ print "Primary beam expression is standard WSRT cos^6: 'cos(min(65*fq*1e-9*r,1.0881))**6'"
+ elif options.primary_beam.upper() == "NEWSTAR":
+ pbfunc = lambda r, fq: max(cos(65 * 1e-9 * fq * r) ** 6, .01)
+ print "Primary beam expression is standard NEWSTAR cos^6: 'max(cos(65*1e-9*fq*r)**6,.01)'"
+ else:
+ try:
+ pbfunc = eval("lambda r,fq:" + options.primary_beam)
+ except Exception, err:
+ print "Error parsing primary beam expression %s: %s" % (options.primary_beam, str(err))
+ sys.exit(1)
+ print "Primary beam expression is ", options.primary_beam
+
+ # get frequency
+ freq = (options.freq or model.refFreq() or 1400) * 1e+6
+ print "Brick frequency is %f MHz" % (freq * 1e-6)
+
+ # read fits file
+ try:
+ input_hdu = pyfits.open(fitsfile)[0]
+ hdr = input_hdu.header
+ except Exception, err:
+ print "Error reading FITS file %s: %s" % (fitsfile, str(err))
+ sys.exit(1)
+ print "Using FITS file", fitsfile
+
+ # reset data if asked to
+ if not options.add_to_image:
+ input_hdu.data[...] = 0
+ print "Contents of FITS image will be reset"
else:
- pos = ModelClasses.Position(ra0,dec0);
- flux = ModelClasses.Flux(max_flux);
- shape = ModelClasses.FITSImage(sx,sy,0,fitsfile,nx,ny,pad=options.padding);
- sname = options.source_name or os.path.splitext(os.path.basename(fitsfile))[0];
- img_src = SkyModel.Source(sname,pos,flux,shape=shape);
- print "Inserting new model component named %s"%sname;
- sources.append(img_src);
- # save model
- model.setSources(sources);
- model.save(output_model);
- print "Saved %d source(s) to output model %s."%(len(model.sources),output_model);
-
+ print "Adding source(s) to FITS image"
+ # Parse header to figure out RA and DEC axes
+ ra_axis = dec_axis = None
+ for iaxis in range(1, hdr['NAXIS'] + 1):
+ name = hdr.get("CTYPE%d" % iaxis, '').upper()
+ if name.startswith("RA"):
+ ra_axis = iaxis
+ ra0pix = hdr["CRPIX%d" % iaxis] - 1
+ elif name.startswith("DEC"):
+ dec_axis = iaxis
+ dec0pix = hdr["CRPIX%d" % iaxis] - 1
+ if ra_axis is None or dec_axis is None:
+ print "Can't find RA and/or DEC axis in this FITS image"
+ sys.exit(1)
+
+ # make WCS from header
+ wcs = WCS(hdr, mode='pyfits')
+ ra0, dec0 = wcs.pix2wcs(ra0pix, dec0pix)
+ print "Image reference pixel (%d,%d) is at %f,%f deg" % (ra0pix, dec0pix, ra0, dec0)
+
+ # apply x/y pixel offset
+ if options.x_offset or options.y_offset:
+ ra0, dec0 = wcs.pix2wcs(ra0pix + options.x_offset, dec0pix + options.y_offset)
+ print "Applying x/y offset moves this to %f,%f deg" % (ra0, dec0)
+ hdr["CRVAL%d" % ra_axis] = ra0
+ hdr["CRVAL%d" % dec_axis] = dec0
+ wcs = WCS(hdr, mode='pyfits')
+
+ # insert sources
+ Imaging.restoreSources(input_hdu, sources, 0, primary_beam=pbfunc, freq=freq)
+ # save fits file
+ try:
+ input_hdu.writeto(fitsfile, clobber=True)
+ except Exception, err:
+ print "Error writing FITS file %s: %s" % (fitsfile, str(err))
+ sys.exit(1)
+ print "Added %d source(s) into FITS file %s" % (len(sources), fitsfile)
+ print "Using pad factor", options.padding
+
+ # remove sources from model if asked to
+ if not options.keep_sources:
+ selected = set([src.name for src in sources])
+ sources = [src for src in model.sources if not src.name in selected]
+ else:
+ sources = model.sources
+
+ # add image to model
+ if output_model:
+ # get image parameters
+ max_flux = float(input_hdu.data.max())
+ ra0 *= DEG
+ dec0 *= DEG
+ sx, sy = wcs.getHalfSizeDeg()
+ sx *= DEG
+ sy *= DEG
+ nx, ny = input_hdu.data.shape[-1:-3:-1]
+ # check if this image is already contained in the model
+ for src in model.sources:
+ if isinstance(getattr(src, 'shape', None), ModelClasses.FITSImage) and os.path.samefile(src.shape.filename,
+ fitsfile):
+ print "Model already contains a component (%s) for this image. Updating the component" % src.name
+ # update source parameters
+ src.position.ra, src.position.dec = ra0, dec0
+ src.flux.I = max_flux
+ src.shape.ex, src.shape.ey = sx, sy
+ src.shape.nx, src.shape.ny = nx, ny
+ src.shape.pad = pad
+ break
+ # not contained, make new source object
+ else:
+ pos = ModelClasses.Position(ra0, dec0)
+ flux = ModelClasses.Flux(max_flux)
+ shape = ModelClasses.FITSImage(sx, sy, 0, fitsfile, nx, ny, pad=options.padding)
+ sname = options.source_name or os.path.splitext(os.path.basename(fitsfile))[0]
+ img_src = SkyModel.Source(sname, pos, flux, shape=shape)
+ print "Inserting new model component named %s" % sname
+ sources.append(img_src)
+ # save model
+ model.setSources(sources)
+ model.save(output_model)
+ print "Saved %d source(s) to output model %s." % (len(model.sources), output_model)
diff --git a/Tigger/bin/tigger-restore b/Tigger/bin/tigger-restore
index 1dd6cdb..1a37d6a 100755
--- a/Tigger/bin/tigger-restore
+++ b/Tigger/bin/tigger-restore
@@ -2,7 +2,7 @@
# -*- coding: utf-8 -*-
#
-#% $Id$
+# % $Id$
#
#
# Copyright (C) 2002-2011
@@ -26,189 +26,189 @@
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
+import os
import sys
+
from astropy.io import fits as pyfits
-import re
-import os.path
-import os
-import math
if __name__ == '__main__':
- import Tigger.Models.Formats
- from Tigger.Models.Formats import ASCII
-
- AUTO = "auto";
- full_formats = Tigger.Models.Formats.listFormatsFull();
- input_formats = [ name for name,(load,save,doc,extensions) in full_formats if load ] + [ AUTO ];
-
- # setup some standard command-line option parsing
- #
- from optparse import OptionParser
- parser = OptionParser(usage="""%prog: [options] input_image sky_model [output_image]""",
- description="""Restores sources from sky model into an input image, writes result to output image. If
-an output image is not specified, makes a name for it automatically.""");
- parser.add_option("-t","--type",choices=input_formats,
- help="Input model type (%s). Default is %%default."%(", ".join(input_formats)));
- parser.add_option("--format",type="string",
- help="""Input format, for ASCII or BBS tables. For ASCII tables, default is "%s". For BBS tables, the default format is specified in the file header."""%ASCII.DefaultDMSFormatString);
- parser.add_option("-n","--num-sources",dest="nsrc",type="int",action="store",
- help="Only restore the NSRC brightest sources");
- parser.add_option("-s","--scale",dest="fluxscale",metavar="FLUXSCALE[,N]",action="store",
- help="rescale model fluxes by given factor. If N is given, rescale N brightest only.");
- parser.add_option("-b","--restoring-beam",type="string",metavar="BMAJ[,BMIN,PA]",
- help="specify restoring beam size, overriding BMAJ/BMIN/BPA keywords in input image. "+
- "Use a single value (arcsec) for circular beam, or else "+
- "supply major/minor size and position angle (deg).");
- parser.add_option("-p","--psf-file",dest="psf",action="store",
- help="determine restoring beam size by fitting PSF file, overriding BMAJ/BMIN/BPA keywords in input image.");
- parser.add_option("--clear",action="store_true",
- help="clear contents of FITS file before adding in sources");
- parser.add_option("--pb",action="store_true",
- help="apply model primary beam function during restoration, if it's defined, and source is not tagged 'nobeam'");
- parser.add_option("--beamgain",action="store_true",
- help="apply beamgain atribute during restoration, if it's defined, and source is not tagged 'nobeam'");
- parser.add_option("--ignore-nobeam",action="store_true",
- help="apply PB or beamgain even if source is tagged 'nobeam'");
- parser.add_option("-F","--freq",type="float",metavar="MHz",default=0,
- help="use this frequency (for spectral indices and primary beams)");
- parser.add_option("-f",dest="force",action="store_true",
- help="overwrite output image even if it already exists");
- parser.add_option("-v","--verbose",dest="verbose",type="int",action="store",
- help="set verbosity level (0 is silent, higher numbers mean more messages)");
- parser.add_option("-T","--timestamps",action="store_true",
- help="enable timestamps in debug messages (useful for timing)");
- parser.set_defaults(n=0,fluxscale='1');
-
- (options,rem_args) = parser.parse_args();
-
- # get filenames
- if len(rem_args) == 2:
- input_image,skymodel = rem_args;
- name,ext = os.path.splitext(input_image)
- output_image = name+".restored"+ext;
- elif len(rem_args) == 3:
- input_image,skymodel,output_image = rem_args;
- else:
- parser.error("Insufficient number of arguments. Use -h for help.");
-
- # check for overwritten output
- if os.path.exists(output_image) and not options.force:
- parser.error("File %s already exists, use the -f option to overwrite."%output_image);
-
- # find Tigger
- try:
- import Tigger
- except ImportError:
- sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))));
- try:
- import Tigger
- except:
- print "Unable to import the Tigger package. Please check your installation and PYTHONPATH.";
- sys.exit(1);
-
- Tigger.nuke_matplotlib(); # don't let the door hit you in the ass, sucka
-
- from Tigger.Tools import Imaging
- from Tigger.Tools.Imaging import FWHM,DEG,ARCSEC
-
- Imaging._verbosity.set_verbose(options.verbose);
- Imaging._verbosity.enable_timestamps(options.timestamps);
+ import Tigger.Models.Formats
+ from Tigger.Models.Formats import ASCII
+
+ AUTO = "auto"
+ full_formats = Tigger.Models.Formats.listFormatsFull()
+ input_formats = [name for name, (load, save, doc, extensions) in full_formats if load] + [AUTO]
+
+ # setup some standard command-line option parsing
+ #
+ from optparse import OptionParser
+
+ parser = OptionParser(usage="""%prog: [options] input_image sky_model [output_image]""",
+ description="""Restores sources from sky model into an input image, writes result to output image. If
+an output image is not specified, makes a name for it automatically.""")
+ parser.add_option("-t", "--type", choices=input_formats,
+ help="Input model type (%s). Default is %%default." % (", ".join(input_formats)))
+ parser.add_option("--format", type="string",
+ help="""Input format, for ASCII or BBS tables. For ASCII tables, default is "%s". For BBS tables, the default format is specified in the file header.""" % ASCII.DefaultDMSFormatString)
+ parser.add_option("-n", "--num-sources", dest="nsrc", type="int", action="store",
+ help="Only restore the NSRC brightest sources")
+ parser.add_option("-s", "--scale", dest="fluxscale", metavar="FLUXSCALE[,N]", action="store",
+ help="rescale model fluxes by given factor. If N is given, rescale N brightest only.")
+ parser.add_option("-b", "--restoring-beam", type="string", metavar="BMAJ[,BMIN,PA]",
+ help="specify restoring beam size, overriding BMAJ/BMIN/BPA keywords in input image. " +
+ "Use a single value (arcsec) for circular beam, or else " +
+ "supply major/minor size and position angle (deg).")
+ parser.add_option("-p", "--psf-file", dest="psf", action="store",
+ help="determine restoring beam size by fitting PSF file, overriding BMAJ/BMIN/BPA keywords in input image.")
+ parser.add_option("--clear", action="store_true",
+ help="clear contents of FITS file before adding in sources")
+ parser.add_option("--pb", action="store_true",
+ help="apply model primary beam function during restoration, if it's defined, and source is not tagged 'nobeam'")
+ parser.add_option("--beamgain", action="store_true",
+ help="apply beamgain atribute during restoration, if it's defined, and source is not tagged 'nobeam'")
+ parser.add_option("--ignore-nobeam", action="store_true",
+ help="apply PB or beamgain even if source is tagged 'nobeam'")
+ parser.add_option("-F", "--freq", type="float", metavar="MHz", default=0,
+ help="use this frequency (for spectral indices and primary beams)")
+ parser.add_option("-f", dest="force", action="store_true",
+ help="overwrite output image even if it already exists")
+ parser.add_option("-v", "--verbose", dest="verbose", type="int", action="store",
+ help="set verbosity level (0 is silent, higher numbers mean more messages)")
+ parser.add_option("-T", "--timestamps", action="store_true",
+ help="enable timestamps in debug messages (useful for timing)")
+ parser.set_defaults(n=0, fluxscale='1')
+
+ (options, rem_args) = parser.parse_args()
+
+ # get filenames
+ if len(rem_args) == 2:
+ input_image, skymodel = rem_args
+ name, ext = os.path.splitext(input_image)
+ output_image = name + ".restored" + ext
+ elif len(rem_args) == 3:
+ input_image, skymodel, output_image = rem_args
+ else:
+ parser.error("Insufficient number of arguments. Use -h for help.")
- # read model and sort by apparent brightness
- # figure out input type
- try:
- input_type,import_func,dum,input_doc = Tigger.Models.Formats.resolveFormat(skymodel,options.type if options.type != AUTO else None);
- except:
- print "Unable to determine model type for %s, please specify one explicitly with the -t/--type option."%skymodel;
- sys.exit(1);
+ # check for overwritten output
+ if os.path.exists(output_image) and not options.force:
+ parser.error("File %s already exists, use the -f option to overwrite." % output_image)
- print "Reading %s (%s)"%(skymodel,input_doc);
- model = import_func(skymodel,format=options.format);
+ # find Tigger
+ try:
+ import Tigger
+ except ImportError:
+ sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
+ try:
+ import Tigger
+ except:
+ print "Unable to import the Tigger package. Please check your installation and PYTHONPATH."
+ sys.exit(1)
- Imaging.dprintf(1,"Read %d sources from %s\n",len(model.sources),skymodel);
+ Tigger.nuke_matplotlib(); # don't let the door hit you in the ass, sucka
- sources = sorted(model.sources,lambda a,b:cmp(b.brightness(),a.brightness()));
+ from Tigger.Tools import Imaging
+ from Tigger.Tools.Imaging import FWHM, DEG, ARCSEC
- # apply counts and flux scales
- if options.nsrc:
- sources = sources[:options.nsrc];
- Imaging.dprintf(1,"Using %d brightest sources\n",len(sources));
+ Imaging._verbosity.set_verbose(options.verbose)
+ Imaging._verbosity.enable_timestamps(options.timestamps)
- if options.fluxscale != '1':
- if "," in options.fluxscale:
- scale,n = options.fluxscale.split(",",1);
- scale = float(scale);
- n = int(n);
- Imaging.dprintf(1,"Flux of %d brightest sources will be scaled by %f\n",n,scale);
- else:
- scale = float(options.fluxscale);
- n = len(sources);
- Imaging.dprintf(1,"Flux of all model sources will be scaled by %f\n",n,scale);
- for src in sources[:n]:
- src.flux.rescale(0.01);
-
- # open input image
- input_hdu = pyfits.open(input_image)[0];
-
- # get restoring beam size
- if options.restoring_beam:
- ff = options.restoring_beam.split(",");
+ # read model and sort by apparent brightness
+ # figure out input type
try:
- if len(ff) == 1:
- gx = gy = float(ff[0]);
- grot = 0;
- print "User-specified restoring beam of %.2f\""%gx;
- else:
- gx,gy,grot = map(float,ff);
- print "User-specified restoring beam of %.2f\" by %.2f\" at PA %.2f deg"%(gx,gy,grot);
+ input_type, import_func, dum, input_doc = Tigger.Models.Formats.resolveFormat(skymodel,
+ options.type if options.type != AUTO else None)
except:
- print "Invalid -b/--restoring-beam setting.";
- sys.exit(1);
- gx /= FWHM*ARCSEC;
- gy /= FWHM*ARCSEC;
- grot /= DEG;
- elif options.psf:
- # fit the PSF
- gx,gy,grot = Imaging.fitPsf(options.psf);
- print "Fitted restoring beam to PSF file %s: %.2f\" by %.2f\" at PA %.2f deg"%(options.psf,gx*FWHM*ARCSEC,gy*FWHM*ARCSEC,grot*DEG);
- else:
- # else look in input header
- gx,gy,grot = [ input_hdu.header.get(x,None) for x in 'BMAJ','BMIN','BPA' ];
- if any([x is None for x in gx,gy,grot]):
- print "Unable to determine restoring beam size, no BMAJ/BMIN/BPA keywords in input image.",
- print "Try using the -b/-p options to specify an explicit restoring beam.";
- sys.exit(1);
- print "Restoring beam (as per input header) is %.2f\" by %.2f\" at PA %.2f deg"%(gx*3600,gy*3600,grot);
- gx /= DEG*FWHM
- gy /= DEG*FWHM
- grot /= DEG
-
-
- pbexp = None;
- freq = options.freq*1e+6 or model.refFreq() or 1400*1e+6;
-
- if options.pb and model.primaryBeam():
- try:
- pbexp = eval('lambda r,fq:'+model.primaryBeam());
- dum = pbexp(0,1e+9); # evaluate at r=0 and 1 GHz as a test
- if not isinstance(dum,float):
- raise TypeError,"Primary beam expression does not evaluate to a float";
- except Exception,exc:
- print "Bad primary beam expression '%s': %s"%(pb,str(exc));
- sys.exit(1);
- if not freq:
- print "Model must contain a reference requency, or else specify one with --freq.";
- sys.exit(1);
-
- # read, restore, write
- print "Restoring model into input image %s"%input_image;
- if options.clear:
- input_hdu.data[...] = 0;
- Imaging.restoreSources(input_hdu,sources,gx,gy,grot,primary_beam=pbexp,freq=freq,apply_beamgain=options.beamgain,ignore_nobeam=options.ignore_nobeam);
-
- print "Writing output image %s"%output_image;
- if os.path.exists(output_image):
- os.remove(output_image);
- input_hdu.writeto(output_image);
-
+ print "Unable to determine model type for %s, please specify one explicitly with the -t/--type option." % skymodel
+ sys.exit(1)
+
+ print "Reading %s (%s)" % (skymodel, input_doc)
+ model = import_func(skymodel, format=options.format)
+
+ Imaging.dprintf(1, "Read %d sources from %s\n", len(model.sources), skymodel)
+
+ sources = sorted(model.sources, lambda a, b: cmp(b.brightness(), a.brightness()))
+
+ # apply counts and flux scales
+ if options.nsrc:
+ sources = sources[:options.nsrc]
+ Imaging.dprintf(1, "Using %d brightest sources\n", len(sources))
+
+ if options.fluxscale != '1':
+ if "," in options.fluxscale:
+ scale, n = options.fluxscale.split(",", 1)
+ scale = float(scale)
+ n = int(n)
+ Imaging.dprintf(1, "Flux of %d brightest sources will be scaled by %f\n", n, scale)
+ else:
+ scale = float(options.fluxscale)
+ n = len(sources)
+ Imaging.dprintf(1, "Flux of all model sources will be scaled by %f\n", n, scale)
+ for src in sources[:n]:
+ src.flux.rescale(0.01)
+
+ # open input image
+ input_hdu = pyfits.open(input_image)[0]
+
+ # get restoring beam size
+ if options.restoring_beam:
+ ff = options.restoring_beam.split(",")
+ try:
+ if len(ff) == 1:
+ gx = gy = float(ff[0])
+ grot = 0
+ print "User-specified restoring beam of %.2f\"" % gx
+ else:
+ gx, gy, grot = map(float, ff)
+ print "User-specified restoring beam of %.2f\" by %.2f\" at PA %.2f deg" % (gx, gy, grot)
+ except:
+ print "Invalid -b/--restoring-beam setting."
+ sys.exit(1)
+ gx /= FWHM * ARCSEC
+ gy /= FWHM * ARCSEC
+ grot /= DEG
+ elif options.psf:
+ # fit the PSF
+ gx, gy, grot = Imaging.fitPsf(options.psf)
+ print "Fitted restoring beam to PSF file %s: %.2f\" by %.2f\" at PA %.2f deg" % (
+ options.psf, gx * FWHM * ARCSEC, gy * FWHM * ARCSEC, grot * DEG)
+ else:
+ # else look in input header
+ gx, gy, grot = [input_hdu.header.get(x, None) for x in 'BMAJ', 'BMIN', 'BPA']
+ if any([x is None for x in gx, gy, grot]):
+ print "Unable to determine restoring beam size, no BMAJ/BMIN/BPA keywords in input image.",
+ print "Try using the -b/-p options to specify an explicit restoring beam."
+ sys.exit(1)
+ print "Restoring beam (as per input header) is %.2f\" by %.2f\" at PA %.2f deg" % (gx * 3600, gy * 3600, grot)
+ gx /= DEG * FWHM
+ gy /= DEG * FWHM
+ grot /= DEG
+
+ pbexp = None
+ freq = options.freq * 1e+6 or model.refFreq() or 1400 * 1e+6
+
+ if options.pb and model.primaryBeam():
+ try:
+ pbexp = eval('lambda r,fq:' + model.primaryBeam())
+ dum = pbexp(0, 1e+9); # evaluate at r=0 and 1 GHz as a test
+ if not isinstance(dum, float):
+ raise TypeError, "Primary beam expression does not evaluate to a float"
+ except Exception, exc:
+ print "Bad primary beam expression '%s': %s" % (pb, str(exc))
+ sys.exit(1)
+ if not freq:
+ print "Model must contain a reference requency, or else specify one with --freq."
+ sys.exit(1)
+
+ # read, restore, write
+ print "Restoring model into input image %s" % input_image
+ if options.clear:
+ input_hdu.data[...] = 0
+ Imaging.restoreSources(input_hdu, sources, gx, gy, grot, primary_beam=pbexp, freq=freq,
+ apply_beamgain=options.beamgain, ignore_nobeam=options.ignore_nobeam)
+
+ print "Writing output image %s" % output_image
+ if os.path.exists(output_image):
+ os.remove(output_image)
+ input_hdu.writeto(output_image)
diff --git a/Tigger/bin/tigger-tag b/Tigger/bin/tigger-tag
index faf8e7f..1e6a07f 100755
--- a/Tigger/bin/tigger-tag
+++ b/Tigger/bin/tigger-tag
@@ -2,7 +2,7 @@
# -*- coding: utf-8 -*-
#
-#% $Id$
+# % $Id$
#
#
# Copyright (C) 2002-2011
@@ -26,344 +26,350 @@
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
-import sys
-from astropy.io import fits as pyfits
-import re
-import os.path
-import math
-import numpy
-import traceback
import fnmatch
+import math
+import re
+import sys
-DEG = math.pi/180;
-ARCSEC = DEG/3600
+import os.path
-NATIVE = "Tigger";
+DEG = math.pi / 180
+ARCSEC = DEG / 3600
+NATIVE = "Tigger"
-def transfer_tags(fromlsm,lsm,output,tags,tolerance,tigger):
- """Transfers tags from a reference LSM to the given LSM. That is, for every tag
- in the given list, finds all sources with those tags in 'fromlsm', then applies
- these tags to all nearby sources in 'lsm' (within a radius of 'tolerance').
- Saves the result to an LSM file given by 'output'.
- """
- # now, set dE tags on sources
- tagset = frozenset(tags.split());
- print("Transferring tags %s from %s to %s (%.2f\" tolerance)"%(",".join(tagset),fromlsm,lsm,tolerance));
+def transfer_tags(fromlsm, lsm, output, tags, tolerance, tigger):
+ """Transfers tags from a reference LSM to the given LSM. That is, for every tag
+ in the given list, finds all sources with those tags in 'fromlsm', then applies
+ these tags to all nearby sources in 'lsm' (within a radius of 'tolerance').
+ Saves the result to an LSM file given by 'output'.
+ """
+ # now, set dE tags on sources
+ tagset = frozenset(tags.split())
+ print("Transferring tags %s from %s to %s (%.2f\" tolerance)" % (",".join(tagset), fromlsm, lsm, tolerance))
- refmodel = tigger.load(fromlsm);
- model = tigger.load(lsm);
- # for each dE-tagged source in the reference model, find all nearby sources
- # in our LSM, and tag them
- for src0 in refmodel.getSourceSubset(",".join(["="+x for x in tagset])):
- for src in model.getSourcesNear(src0.pos.ra,src0.pos.dec,tolerance=tolerance*ARCSEC):
- for tag in tagset:
- tagval = src0.getTag(tag,None);
- if tagval is not None:
- if src.getTag(tag,None) != tagval:
- src.setTag(tag,tagval);
- print("setting tag %s=%s on source %s (from reference source %s)"%(tag,tagval,src.name,src0.name))
- model.save(output);
+ refmodel = tigger.load(fromlsm)
+ model = tigger.load(lsm)
+ # for each dE-tagged source in the reference model, find all nearby sources
+ # in our LSM, and tag them
+ for src0 in refmodel.getSourceSubset(",".join(["=" + x for x in tagset])):
+ for src in model.getSourcesNear(src0.pos.ra, src0.pos.dec, tolerance=tolerance * ARCSEC):
+ for tag in tagset:
+ tagval = src0.getTag(tag, None)
+ if tagval is not None:
+ if src.getTag(tag, None) != tagval:
+ src.setTag(tag, tagval)
+ print("setting tag %s=%s on source %s (from reference source %s)" % (
+ tag, tagval, src.name, src0.name))
+ model.save(output)
if __name__ == '__main__':
- import Kittens.utils
- from Kittens.utils import curry
- _verbosity = Kittens.utils.verbosity(name="convert-model");
- dprint = _verbosity.dprint;
- dprintf = _verbosity.dprintf;
-
- # find Tigger
- try:
- import Tigger
- except ImportError:
- dirname = os.path.dirname(os.path.realpath(__file__));
- # go up the directory tree looking for directory "Tigger"
- while len(dirname) > 1:
- if os.path.basename(dirname) == "Tigger":
- break;
- dirname = os.path.dirname(dirname);
- else:
- print "Unable to locate the Tigger directory, it is not a parent of %s. Please check your installation and/or PYTHONPATH."%os.path.realpath(__file__);
- sys.exit(1);
- sys.path.append(os.path.dirname(dirname));
+ import Kittens.utils
+
+ _verbosity = Kittens.utils.verbosity(name="convert-model")
+ dprint = _verbosity.dprint
+ dprintf = _verbosity.dprintf
+
+ # find Tigger
try:
- import Tigger
- except:
- print "Unable to import the Tigger package from %s. Please check your installation and PYTHONPATH."%dirname;
- sys.exit(1);
-
- Tigger.nuke_matplotlib(); # don't let the door hit you in the ass, sucka
-
- # setup some standard command-line option parsing
- #
- from optparse import OptionParser
- parser = OptionParser(usage="""%prog: sky_model [NAME or SELTAG<>SELVAL] [TAG=[TYPE:]VALUE or +TAG or !TAG or /TAG ...]""",
- description=
-"""Sets or changes tags of selected sources in the sky model.
-Use NAME (with shell-style wildcards allowed) to select sources by name, or
-=SELTAG to select sources having the specified (non-zero) tag, or SELTAG<>SELVAL to
-select sources by comparing a tag to a value, where '<>' represents a comparison
-operator, and can be one of == (or =),!=,<=,<,>,>= (or the FORTRAN-style
-operators .eq.,.ne.,.le.,.lt.,.gt.,.ge.). SELVAL may also be followed by one of the characters
-'d', 'm' or 's', in which case it will be converted from degrees,
-minutes or seconds into radians. This is useful for selections such as "r<5d".
-Then, with a subset of sources selected, use TAG=TYPE:VALUE (where TYPE is one of: bool, int, float, str, complex)
-to set a tag on the selected sources to a value of a specific type, or TAG=VALUE to determine type
-automatically, or +TAG to set a bool True tag, !TAG to set a False tag, and /TAG to remove a tag."""
-);
-
- parser.add_option("-l","--list",action="store_true",
- help="Simply lists selected sources, does not apply any tags.");
- parser.add_option("-o","--output",metavar="FILENAME",type="string",
- help="Saves changes to different output model. Default is to save in place.");
- parser.add_option("-f","--force",action="store_true",
- help="Saves changes to model without prompting. Default is to prompt.");
- parser.add_option("-t", "--transfer-tags",dest="transfer_tags",type="string",metavar="FROM_LSM:TOL",
- help="""Transfers tags from a reference LSM (FROM_LSM) to the given LSM (sky_model).
+ import Tigger
+ except ImportError:
+ dirname = os.path.dirname(os.path.realpath(__file__))
+ # go up the directory tree looking for directory "Tigger"
+ while len(dirname) > 1:
+ if os.path.basename(dirname) == "Tigger":
+ break
+ dirname = os.path.dirname(dirname)
+ else:
+ print "Unable to locate the Tigger directory, it is not a parent of %s. Please check your installation and/or PYTHONPATH." % os.path.realpath(
+ __file__)
+ sys.exit(1)
+ sys.path.append(os.path.dirname(dirname))
+ try:
+ import Tigger
+ except:
+ print "Unable to import the Tigger package from %s. Please check your installation and PYTHONPATH." % dirname
+ sys.exit(1)
+
+ Tigger.nuke_matplotlib(); # don't let the door hit you in the ass, sucka
+
+ # setup some standard command-line option parsing
+ #
+ from optparse import OptionParser
+
+ parser = OptionParser(
+ usage="""%prog: sky_model [NAME or SELTAG<>SELVAL] [TAG=[TYPE:]VALUE or +TAG or !TAG or /TAG ...]""",
+ description=
+ """Sets or changes tags of selected sources in the sky model.
+ Use NAME (with shell-style wildcards allowed) to select sources by name, or
+ =SELTAG to select sources having the specified (non-zero) tag, or SELTAG<>SELVAL to
+ select sources by comparing a tag to a value, where '<>' represents a comparison
+ operator, and can be one of == (or =),!=,<=,<,>,>= (or the FORTRAN-style
+ operators .eq.,.ne.,.le.,.lt.,.gt.,.ge.). SELVAL may also be followed by one of the characters
+ 'd', 'm' or 's', in which case it will be converted from degrees,
+ minutes or seconds into radians. This is useful for selections such as "r<5d".
+ Then, with a subset of sources selected, use TAG=TYPE:VALUE (where TYPE is one of: bool, int, float, str, complex)
+ to set a tag on the selected sources to a value of a specific type, or TAG=VALUE to determine type
+ automatically, or +TAG to set a bool True tag, !TAG to set a False tag, and /TAG to remove a tag."""
+ )
+
+ parser.add_option("-l", "--list", action="store_true",
+ help="Simply lists selected sources, does not apply any tags.")
+ parser.add_option("-o", "--output", metavar="FILENAME", type="string",
+ help="Saves changes to different output model. Default is to save in place.")
+ parser.add_option("-f", "--force", action="store_true",
+ help="Saves changes to model without prompting. Default is to prompt.")
+ parser.add_option("-t", "--transfer-tags", dest="transfer_tags", type="string", metavar="FROM_LSM:TOL",
+ help="""Transfers tags from a reference LSM (FROM_LSM) to the given LSM (sky_model).
That is, for every tag in the given list, finds all sources with those tags in the reference LSM,
then applies these tags to all nearby sources in LSM (within a radius of 'tolerance' [ARCSEC]).
Saves the result to an LSM file given by -o/--output.
-""");
- parser.add_option("-d", "--debug",dest="verbose",type="string",action="append",metavar="Context=Level",
- help="(for debugging Python code) sets verbosity level of the named Python context. May be used multiple times.");
-
- parser.set_defaults();
-
- (options,rem_args) = parser.parse_args();
-
- # get filenames
- if len(rem_args) < 2:
- parser.error("Incorrect number of arguments. Use -h for help.");
-
- skymodel = rem_args[0];
- # load the model
- model = Tigger.load(skymodel);
- if not model.sources:
- print "Input model %s contains no sources"%skymodel;
- sys.exit(0);
- print "Input model contains %d sources"%len(model.sources);
-
- if options.transfer_tags:
- fromlsm, tolerance = options.transfer_tags.split(":")
- tags = " ".join(rem_args[1:])
- transfer_tags(fromlsm, skymodel, options.output, tags, float(tolerance), Tigger)
- sys.exit(0)
-
- # comparison predicates for the SELTAG<>SELVAL option
- select_predicates = {
- '==':lambda x,y:x==y,
- '!=':lambda x,y:x!=y,
- '>=':lambda x,y:x>=y,
- '<=':lambda x,y:x<=y,
- '>' :lambda x,y:x>y,
- '<' :lambda x,y:x=y,
- '.le.':lambda x,y:x<=y,
- '.gt.' :lambda x,y:x>y,
- '.lt.' :lambda x,y:xSELVAL, or the TAG=[TYPE:]VALUE, or the [+!/]TAG forms
- # If none match, assume the NAME form
- mselcomp = re.match("^(?i)([^=<>!.]+)(%s)([^dms]+)([dms])?"%"|".join([ key.replace('.','\.') for key in select_predicates.keys()]),arg);
- mseltag = re.match("=(.+)$",arg);
- mset = re.match("^(.+)=((bool|int|str|float|complex):)?(.+)$",arg);
- msetbool = re.match("^([+!/])(.+)$",arg);
-
- # SELTAG<>SELVAL selection
- if mselcomp:
- seltag,oper,selval,unit = mselcomp.groups();
- try:
- selval = float(selval)*select_units.get(unit,1.);
- except:
- parser.error("Malformed selection string '%s': right-hand side is not a number."%arg);
- predicate = select_predicates[oper.lower()];
- # get tag value
- srctag = [ (src,getTagValue(src,seltag)) for src in model.sources ];
- apply_selection([ src for src,tag in srctag if tag is not None and predicate(tag,selval) ],arg);
- elif mseltag:
- seltag = mseltag.groups()[0];
- apply_selection([ src for src in model.sources if getTagValue(src,seltag) ],arg);
- elif not mseltag and not mselcomp and not mset and not msetbool:
- apply_selection([ src for src in model.sources if fnmatch.fnmatch(src.name,arg) ],arg);
- elif mset:
- sources = retrieve_selection();
- if options.list:
- print "--list in effect, ignoring tagging commands";
- continue;
- tagname,typespec,typename,value = mset.groups();
- # if type is specified, use it to explicitly convert the value
- # first bool: allow True/False/T/F
- if typename == "bool":
- val = value.lower();
- if val == "true" or val == "t":
- newval = True;
- elif val == "false" or val == "f":
- newval = False;
+""")
+ parser.add_option("-d", "--debug", dest="verbose", type="string", action="append", metavar="Context=Level",
+ help="(for debugging Python code) sets verbosity level of the named Python context. May be used multiple times.")
+
+ parser.set_defaults()
+
+ (options, rem_args) = parser.parse_args()
+
+ # get filenames
+ if len(rem_args) < 2:
+ parser.error("Incorrect number of arguments. Use -h for help.")
+
+ skymodel = rem_args[0]
+ # load the model
+ model = Tigger.load(skymodel)
+ if not model.sources:
+ print "Input model %s contains no sources" % skymodel
+ sys.exit(0)
+ print "Input model contains %d sources" % len(model.sources)
+
+ if options.transfer_tags:
+ fromlsm, tolerance = options.transfer_tags.split(":")
+ tags = " ".join(rem_args[1:])
+ transfer_tags(fromlsm, skymodel, options.output, tags, float(tolerance), Tigger)
+ sys.exit(0)
+
+ # comparison predicates for the SELTAG<>SELVAL option
+ select_predicates = {
+ '==': lambda x, y: x == y,
+ '!=': lambda x, y: x != y,
+ '>=': lambda x, y: x >= y,
+ '<=': lambda x, y: x <= y,
+ '>': lambda x, y: x > y,
+ '<': lambda x, y: x < y,
+ '.eq.': lambda x, y: x == y,
+ '.ne.': lambda x, y: x != y,
+ '.ge.': lambda x, y: x >= y,
+ '.le.': lambda x, y: x <= y,
+ '.gt.': lambda x, y: x > y,
+ '.lt.': lambda x, y: x < y
+ }
+ # units for same
+ select_units = dict(d=DEG, m=DEG / 60, s=DEG / 3600)
+
+ # This is where we accumulate the result of selection arguments, until we hit the first tagging argument.
+ # Initially None, meaning no explicit selection
+ selected_ids = None
+
+ # This is where we put the selection when we hit the first tagging argument.
+ selection = None
+
+ # this is set to true when the selection is listed
+ listed = False
+ # set to true when the model is modified
+ modified = False
+
+
+ def apply_selection(sel, selstr):
+ global selection
+ global selected_ids
+ global listed
+ listed = False
+ """Helper function: applies selection argument"""
+ # if selection is not None, then we've already selected and tagged something, so we need
+ # to reset the selection to empty and start again. If selected_ids is None, this is the first selection
+ if selection is not None or selected_ids is None:
+ print "Selecting sources:"
+ selected_ids = set()
+ selection = None
+ # add to current selection
+ selected_ids.update(map(id, sel))
+ # print result
+ if not len(sel):
+ print ' %-16s: no sources selected' % selstr
+ elif len(sel) == 1:
+ print ' %-16s: one source selected (%s)' % (selstr, sel[0].name)
+ elif len(sel) <= 5:
+ print ' %-16s: %d sources selected (%s)' % (selstr, len(sel), " ".join([src.name for src in sel]))
else:
- try:
- newval = bool(int(value));
- except:
- print "Can't parse \"%s\" as a value of type bool"%value;
- sys.exit(2);
- # else some other type is specified -- use it to convert the value
- elif typename:
+ print ' %-16s: %d sources selected' % (selstr, len(sel))
+
+
+ def retrieve_selection():
+ global selection
+ global selected_ids
+ """Helper function: retrieves current selection in preparation for tagging"""
+ # if selection is None, then we need to set it up based on selected_ids
+ if selection is None:
+ # no explicit selection: use entire model
+ if selected_ids is None:
+ selection = model.sources
+ print "No explicit selection, using all sources."
+ # else use selected set
+ else:
+ selection = [src for src in model.sources if id(src) in selected_ids]
+ print "Using %d selected sources:" % len(selection)
+ if options.list:
+ print "Sources: %s" % (" ".join([x.name for x in selection]))
+ global listed
+ listed = True
+ return selection
+
+
+ def getTagValue(src, tag):
+ """Helper function: looks for the given tag in the source, or in its sub-objects"""
+ for obj in src, src.pos, src.flux, getattr(src, 'shape', None), getattr(src, 'spectrum', None):
+ if obj is not None and hasattr(obj, tag):
+ return getattr(obj, tag)
+ return None
+
+
+ def lookupObject(src, tagname):
+ """helper function to look into sub-objects of a Source object.
+ Given src and "a", returns src,"a"
+ Given src and "a.b", returns src.a and "b"
+ """
+ tags = tagname.split(".")
+ for subobj in tags[:-1]:
+ src = getattr(src, subobj, None)
+ if src is None:
+ print "Can't resolve attribute %s for source %s" % (tagname, src.name)
+ sys.exit(1)
+ return src, tags[-1]
+
+
+ # loop over all arguments
+ for arg in rem_args[1:]:
+ # Match either the SELTAG<>SELVAL, or the TAG=[TYPE:]VALUE, or the [+!/]TAG forms
+ # If none match, assume the NAME form
+ mselcomp = re.match("^(?i)([^=<>!.]+)(%s)([^dms]+)([dms])?" % "|".join(
+ [key.replace('.', '\.') for key in select_predicates.keys()]), arg)
+ mseltag = re.match("=(.+)$", arg)
+ mset = re.match("^(.+)=((bool|int|str|float|complex):)?(.+)$", arg)
+ msetbool = re.match("^([+!/])(.+)$", arg)
+
+ # SELTAG<>SELVAL selection
+ if mselcomp:
+ seltag, oper, selval, unit = mselcomp.groups()
+ try:
+ selval = float(selval) * select_units.get(unit, 1.)
+ except:
+ parser.error("Malformed selection string '%s': right-hand side is not a number." % arg)
+ predicate = select_predicates[oper.lower()]
+ # get tag value
+ srctag = [(src, getTagValue(src, seltag)) for src in model.sources]
+ apply_selection([src for src, tag in srctag if tag is not None and predicate(tag, selval)], arg)
+ elif mseltag:
+ seltag = mseltag.groups()[0]
+ apply_selection([src for src in model.sources if getTagValue(src, seltag)], arg)
+ elif not mseltag and not mselcomp and not mset and not msetbool:
+ apply_selection([src for src in model.sources if fnmatch.fnmatch(src.name, arg)], arg)
+ elif mset:
+ sources = retrieve_selection()
+ if options.list:
+ print "--list in effect, ignoring tagging commands"
+ continue
+ tagname, typespec, typename, value = mset.groups()
+ # if type is specified, use it to explicitly convert the value
+ # first bool: allow True/False/T/F
+ if typename == "bool":
+ val = value.lower()
+ if val == "true" or val == "t":
+ newval = True
+ elif val == "false" or val == "f":
+ newval = False
+ else:
+ try:
+ newval = bool(int(value))
+ except:
+ print "Can't parse \"%s\" as a value of type bool" % value
+ sys.exit(2)
+ # else some other type is specified -- use it to convert the value
+ elif typename:
+ try:
+ newval = getattr(__builtin__, typename)(value)
+ except:
+ print "Can't parse \"%s\" as a value of type %s" % (value, typename)
+ sys.exit(2)
+ # else auto-convert
+ else:
+ newval = None
+ for tp in int, float, complex, str:
+ try:
+ newval = tp(value)
+ break
+ except:
+ pass
+ # ok, value determined
+ if type(newval) is str:
+ value = '"%s"' % value
+ if sources:
+ print " setting tag %s=%s (type '%s')" % (tagname, value, type(newval).__name__)
+ for src in sources:
+ obj, tag = lookupObject(src, tagname)
+ obj.setAttribute(tag, newval)
+ modified = True
+ else:
+ print "No sources selected, ignoring tagging commands"
+ elif msetbool:
+ sources = retrieve_selection()
+ if options.list:
+ print "--list in effect, ignoring tagging commands"
+ continue
+ if sources:
+ op, tagname = msetbool.groups()
+ if op == "+":
+ print " setting tag %s=True" % tagname
+ method = 'setAttribute'
+ args = (tagname, True)
+ elif op == "!":
+ print " setting tag %s=False" % tagname
+ method = 'setAttribute'
+ args = (tagname, False)
+ elif op == "/":
+ print " removing tag %s" % tagname
+ method = 'removeAttribute'
+ args = (tagname,)
+ for src in sources:
+ obj, tag = lookupObject(src, tagname)
+ getattr(obj, method)(*args)
+ modified = True
+ else:
+ print "No sources selected, ignoring tagging commands"
+
+ if options.list:
+ if not listed:
+ retrieve_selection()
+
+ if not modified:
+ print "Model was not modified"
+ sys.exit(0)
+
+ # prompt
+ if not options.force:
try:
- newval = getattr(__builtin__,typename)(value);
+ raw_input("Press ENTER to save model or Ctrl+C to cancel: ")
except:
- print "Can't parse \"%s\" as a value of type %s"%(value,typename);
- sys.exit(2);
- # else auto-convert
- else:
- newval = None;
- for tp in int,float,complex,str:
- try:
- newval = tp(value);
- break;
- except:
- pass;
- # ok, value determined
- if type(newval) is str:
- value = '"%s"'%value;
- if sources:
- print " setting tag %s=%s (type '%s')"%(tagname,value,type(newval).__name__);
- for src in sources:
- obj,tag = lookupObject(src,tagname);
- obj.setAttribute(tag,newval);
- modified = True;
- else:
- print "No sources selected, ignoring tagging commands";
- elif msetbool:
- sources = retrieve_selection();
- if options.list:
- print "--list in effect, ignoring tagging commands";
- continue;
- if sources:
- op,tagname = msetbool.groups();
- if op == "+":
- print " setting tag %s=True"%tagname;
- method = 'setAttribute';
- args = (tagname,True);
- elif op == "!":
- print " setting tag %s=False"%tagname;
- method = 'setAttribute';
- args = (tagname,False);
- elif op == "/":
- print " removing tag %s"%tagname;
- method = 'removeAttribute';
- args = (tagname,);
- for src in sources:
- obj,tag = lookupObject(src,tagname);
- getattr(obj,method)(*args);
- modified = True;
- else:
- print "No sources selected, ignoring tagging commands";
-
- if options.list:
- if not listed:
- retrieve_selection();
-
- if not modified:
- print "Model was not modified";
- sys.exit(0);
-
- # prompt
- if not options.force:
- try:
- raw_input("Press ENTER to save model or Ctrl+C to cancel: ");
- except:
- print "Cancelling";
- sys.exit(1);
-
- # save output
- if options.output:
- model.save(options.output);
- print "Saved updated model to %s"%options,output;
- else:
- model.save(skymodel);
- print "Saved updated model";
+ print "Cancelling"
+ sys.exit(1)
+ # save output
+ if options.output:
+ model.save(options.output)
+ print "Saved updated model to %s" % options, output
+ else:
+ model.save(skymodel)
+ print "Saved updated model"
From 0b3a2b634ad184f503152fe5265a0acd273aae05 Mon Sep 17 00:00:00 2001
From: Gijs Molenaar
Date: Fri, 6 Apr 2018 11:37:56 +0200
Subject: [PATCH 06/13] run 2to3
---
Tigger/Coordinates.py | 14 ++++----
Tigger/Models/Formats/AIPSCC.py | 8 ++---
Tigger/Models/Formats/AIPSCCFITS.py | 4 +--
Tigger/Models/Formats/ASCII.py | 24 +++++++-------
Tigger/Models/Formats/BBS.py | 18 +++++------
Tigger/Models/Formats/ModelHTML.py | 4 +--
Tigger/Models/Formats/NEWSTAR.py | 4 +--
Tigger/Models/Formats/PyBDSMGaul.py | 2 +-
Tigger/Models/Formats/__init__.py | 8 ++---
Tigger/Models/ModelClasses.py | 50 ++++++++++++++---------------
Tigger/Models/PlotStyles.py | 8 ++---
Tigger/Models/SkyModel.py | 15 +++++----
Tigger/SiameseInterface.py | 2 +-
Tigger/Tools/FITSHeaders.py | 4 +--
Tigger/Tools/Imaging.py | 28 ++++++++--------
15 files changed, 97 insertions(+), 96 deletions(-)
diff --git a/Tigger/Coordinates.py b/Tigger/Coordinates.py
index c3ccae5..6516ab0 100644
--- a/Tigger/Coordinates.py
+++ b/Tigger/Coordinates.py
@@ -62,7 +62,7 @@
from astLib.astWCS import WCS
import PyWCSTools.wcs
except ImportError:
- print "Failed to import the astLib.astWCS and/or PyWCSTools module. Please install the astLib package (http://astlib.sourceforge.net/)."
+ print("Failed to import the astLib.astWCS and/or PyWCSTools module. Please install the astLib package (http://astlib.sourceforge.net/).")
raise
startup_dprint(1, "imported WCS")
@@ -91,7 +91,7 @@ def angular_dist_pos_angle2(ra1, dec1, ra2, dec2):
x = cosa * sind0 - sind * cosd0
y = sina
z = cosa * cosd0 + sind * sind0
- print x, y, z
+ print(x, y, z)
PA = numpy.arctan2(y, -x)
R = numpy.arccos(z)
@@ -108,7 +108,7 @@ def angular_dist_pos_angle2(ra1, dec1, ra2, dec2):
x = cosa * sind0 - sind * cosd0
y = sina
z = cosa * cosd0 + sind * sind0
- print x, y, z
+ print(x, y, z)
PA = numpy.arctan2(y, -x)
R = numpy.arccos(z)
return R, PA
@@ -202,13 +202,13 @@ def offset_lm(cls, dra, ddec, ra0, dec0):
return cls(ra0, dec0).offset(dra, ddec)
def lm(self, ra, dec):
- raise TypeError, "lm() not yet implemented in projection %s" % type(self).__name__
+ raise TypeError("lm() not yet implemented in projection %s" % type(self).__name__)
def offset(self, dra, ddec):
- raise TypeError, "offset() not yet implemented in projection %s" % type(self).__name__
+ raise TypeError("offset() not yet implemented in projection %s" % type(self).__name__)
def radec(self, l, m):
- raise TypeError, "radec() not yet implemented in projection %s" % type(self).__name__
+ raise TypeError("radec() not yet implemented in projection %s" % type(self).__name__)
class Projection(object):
@@ -233,7 +233,7 @@ def __init__(self, header):
self.yscale = self.wcs.getYPixelSizeDeg() * DEG
has_projection = True
except:
- print "No WCS in FITS file, falling back to pixel coordinates."
+ print("No WCS in FITS file, falling back to pixel coordinates.")
ra0 = dec0 = self.xpix0 = self.ypix0 = 0
self.xscale = self.yscale = DEG / 3600
has_projection = False
diff --git a/Tigger/Models/Formats/AIPSCC.py b/Tigger/Models/Formats/AIPSCC.py
index 3ceb951..fc35794 100644
--- a/Tigger/Models/Formats/AIPSCC.py
+++ b/Tigger/Models/Formats/AIPSCC.py
@@ -63,7 +63,7 @@ def load(filename, center=None, **kw):
ff = file(filename)
if center is None:
- raise ValueError, "field centre must be specified"
+ raise ValueError("field centre must be specified")
# now process file line-by-line
linenum = 0
@@ -76,7 +76,7 @@ def load(filename, center=None, **kw):
continue
try:
num = int(ff[0])
- dx, dy, i, i_tot = map(float, ff[1:])
+ dx, dy, i, i_tot = list(map(float, ff[1:]))
except:
continue
try:
@@ -84,8 +84,8 @@ def load(filename, center=None, **kw):
l, m = sin(dx * ARCSEC), sin(dy * ARCSEC)
ra, dec = lm_to_radec(l, m, *center)
pos = ModelClasses.Position(ra, dec)
- except Exception, exc:
- print "CC %d: error converting coordinates (%s), skipping" % (num, str(exc))
+ except Exception as exc:
+ print("CC %d: error converting coordinates (%s), skipping" % (num, str(exc)))
continue
flux = ModelClasses.Flux(i)
# now create a source object
diff --git a/Tigger/Models/Formats/AIPSCCFITS.py b/Tigger/Models/Formats/AIPSCCFITS.py
index ccff158..2d78772 100644
--- a/Tigger/Models/Formats/AIPSCCFITS.py
+++ b/Tigger/Models/Formats/AIPSCCFITS.py
@@ -79,7 +79,7 @@ def load(filename, center=None, **kw):
ra = hdr['CRVAL1'] * _units[hdr.get('CUNIT1', 'DEG').strip()]
dec = hdr['CRVAL2'] * _units[hdr.get('CUNIT2', 'DEG').strip()]
- print "Using FITS image centre (%.4f, %.4f deg) as field centre" % (ra / DEG, dec / DEG)
+ print("Using FITS image centre (%.4f, %.4f deg) as field centre" % (ra / DEG, dec / DEG))
center = ra, dec
# now process file line-by-line
@@ -88,7 +88,7 @@ def load(filename, center=None, **kw):
ux = _units[hdr.get('TUNIT2', 'DEG').strip()]
uy = _units[hdr.get('TUNIT3', 'DEG').strip()]
for num, ccrec in enumerate(cclist):
- stokes_i, dx, dy = map(float, ccrec)
+ stokes_i, dx, dy = list(map(float, ccrec))
# convert dx/dy to real positions
l, m = sin(dx * ux), sin(dy * uy)
ra, dec = lm_to_radec(l, m, *center)
diff --git a/Tigger/Models/Formats/ASCII.py b/Tigger/Models/Formats/ASCII.py
index 8694ef2..0623082 100644
--- a/Tigger/Models/Formats/ASCII.py
+++ b/Tigger/Models/Formats/ASCII.py
@@ -124,7 +124,7 @@ def get_field(name):
def get_ang_field(name, units=ANGULAR_UNITS):
column = err_column = colunit = errunit = None
units = units or ANGULAR_UNITS
- for unit, scale in units.iteritems():
+ for unit, scale in units.items():
if column is None:
column = format.get("%s_%s" % (name, unit))
if column is not None:
@@ -157,14 +157,14 @@ def getval(num, scale=1):
# make list of fieldname,fieldnumber tuples
fields = [(field, i) for i, field in enumerate(format.split())]
if not fields:
- raise ValueError, "illegal format string in file: '%s'" % format
+ raise ValueError("illegal format string in file: '%s'" % format)
# last fieldname can end with ... to indicate that it absorbs the rest of the line
if fields[-1][0].endswith('...'):
fields[-1] = (fields[-1][0][:-3], slice(fields[-1][1], None))
# make format dict
format = dict(fields)
elif not isinstance(format, dict):
- raise TypeError, "invalid 'format' argument of type %s" % (type(format))
+ raise TypeError("invalid 'format' argument of type %s" % (type(format)))
# nf = max(format.itervalues())+1
# fields = ['---']*nf
# for field,number in format.iteritems():
@@ -172,26 +172,26 @@ def getval(num, scale=1):
# format_str = " ".join(fields)
# get list of custom attributes from format
custom_attrs = []
- for name, col in format.iteritems():
+ for name, col in format.items():
if name.startswith(":"):
m = re.match("^:(bool|int|float|complex|str):([\w]+)$", name)
if not m:
- raise TypeError, "invalid field specification '%s' in format string" % name
+ raise TypeError("invalid field specification '%s' in format string" % name)
custom_attrs.append((eval(m.group(1)), m.group(2), col))
# get minimum necessary fields from format
name_field = format.get('name', None)
# flux
i_field, i_err_field = get_field("i")
if i_field is None:
- raise ValueError, "ASCII format specification lacks mandatory flux field ('i')"
+ raise ValueError("ASCII format specification lacks mandatory flux field ('i')")
# main RA field
ra_field, ra_scale, ra_err_field, ra_err_scale = get_ang_field('ra', ANGULAR_UNITS_RA)
if ra_field is None:
- raise ValueError, "ASCII format specification lacks mandatory Right Ascension field ('ra_h', 'ra_d' or 'ra_rad')"
+ raise ValueError("ASCII format specification lacks mandatory Right Ascension field ('ra_h', 'ra_d' or 'ra_rad')")
# main Dec field
dec_field, dec_scale, dec_err_field, dec_err_scale = get_ang_field('dec', ANGULAR_UNITS_DEC)
if dec_field is None:
- raise ValueError, "ASCII format specification lacks mandatory Declination field ('dec_d' or 'dec_rad')"
+ raise ValueError("ASCII format specification lacks mandatory Declination field ('dec_d' or 'dec_rad')")
# polarization as QUV
quv_fields = [get_field(x) for x in ['q', 'u', 'v']]
# linear polarization as fraction and angle
@@ -201,7 +201,7 @@ def getval(num, scale=1):
if not polpa_field is not None:
polpa_field, polpa_scale = format.get('pol_pa_rad', None), 1
# fields for extent parameters
- extent_fields = [get_ang_field(x, ANGULAR_UNITS) for x in 'emaj', 'emin', 'pa']
+ extent_fields = [get_ang_field(x, ANGULAR_UNITS) for x in ('emaj', 'emin', 'pa')]
# all three must be present, else ignore
if any([x[0] is None for x in extent_fields]):
extent_fields = None
@@ -396,7 +396,7 @@ def save(model, filename, sources=None, format=None, **kw):
# convert this into format dict
fields = [[field, i] for i, field in enumerate(format_str.split())]
if not fields:
- raise ValueError, "illegal format string '%s'" % format
+ raise ValueError("illegal format string '%s'" % format)
# last fieldname can end with ... ("tags..."), so strip it
if fields[-1][0].endswith('...'):
fields[-1][0] = fields[-1][0][:-3]
@@ -407,9 +407,9 @@ def save(model, filename, sources=None, format=None, **kw):
name_field = format.get('name', None)
# main RA field
ra_rad_field, ra_d_field, ra_h_field, ra_m_field, ra_s_field = \
- [format.get(x, None) for x in 'ra_rad', 'ra_d', 'ra_h', 'ra_m', 'ra_s']
+ [format.get(x, None) for x in ('ra_rad', 'ra_d', 'ra_h', 'ra_m', 'ra_s')]
dec_rad_field, dec_d_field, dec_m_field, dec_s_field = \
- [format.get(x, None) for x in 'dec_rad', 'dec_d', 'dec_m', 'dec_s']
+ [format.get(x, None) for x in ('dec_rad', 'dec_d', 'dec_m', 'dec_s')]
if ra_h_field is not None:
ra_scale = 15
ra_d_field = ra_h_field
diff --git a/Tigger/Models/Formats/BBS.py b/Tigger/Models/Formats/BBS.py
index 3e52a90..670ed74 100644
--- a/Tigger/Models/Formats/BBS.py
+++ b/Tigger/Models/Formats/BBS.py
@@ -54,7 +54,7 @@ def __init__(self, parser, fields=None):
self._fields = fields
if fields:
# parse fields
- for field, number in parser.field_number.iteritems():
+ for field, number in parser.field_number.items():
fval = fields[number].strip() if number < len(fields) else ''
if not fval:
fval = parser.field_default.get(field, '')
@@ -64,7 +64,7 @@ def __init__(self, parser, fields=None):
self.dec_rad = parser.getAngle(self, 'Dec', 'dech', 'decd', 'decm', 'decs')
else:
# else make empty line
- for field in parser.field_number.iterkeys():
+ for field in parser.field_number.keys():
setattr(self, field, '')
def setPosition(self, ra, dec):
@@ -77,13 +77,13 @@ def makeStr(self):
"""Converts into a string using the designated parser"""
# build up dict of valid fields
fields = {}
- for field, num in self._parser.field_number.iteritems():
+ for field, num in self._parser.field_number.items():
value = getattr(self, field, None)
if value:
fields[num] = value
# output
output = ""
- nfields = max(fields.iterkeys()) + 1
+ nfields = max(fields.keys()) + 1
for i in range(nfields):
sep = self._parser.separators[i] if i < nfields - 1 else ''
output += "%s%s" % (fields.get(i, ''), sep)
@@ -160,7 +160,7 @@ def getAngle(self, catline, field, fh, fd, fm, fs):
else:
match = re.match('([+-]?\s*\d+).(\d+).(.*)$', fstr)
if not match:
- raise ValueError, "invalid direction '%s'" % fstr
+ raise ValueError("invalid direction '%s'" % fstr)
d, m, s = match.groups()
else:
if self.defines(fh):
@@ -215,7 +215,7 @@ def load(filename, freq0=None, center_on_brightest=False, **kw):
line0 = ff.readline().strip()
match = re.match("#\s*\((.+)\)\s*=\s*format", line0)
if not match:
- raise ValueError, "line 1 is not a valid format specification"
+ raise ValueError("line 1 is not a valid format specification")
format_str = match.group(1)
# create format parser from this string
parser = CatalogParser(format_str)
@@ -223,7 +223,7 @@ def load(filename, freq0=None, center_on_brightest=False, **kw):
# check for mandatory fields
for field in "Name", "Type":
if not parser.defines(field):
- raise ValueError, "Table lacks mandatory field '%s'" % field
+ raise ValueError("Table lacks mandatory field '%s'" % field)
maxbright = 0
patches = []
@@ -251,7 +251,7 @@ def load(filename, freq0=None, center_on_brightest=False, **kw):
# check source type
stype = catline.Type.upper()
if stype not in ("POINT", "GAUSSIAN"):
- raise ValueError, "unsupported source type %s" % stype
+ raise ValueError("unsupported source type %s" % stype)
# see if we have freq0
if freq0:
f0 = freq0
@@ -339,7 +339,7 @@ def save(model, filename, sources=None, format=None, **kw):
# check for mandatory fields
for field in "Name", "Type":
if not parser.defines(field):
- raise ValueError, "Output format lacks mandatory field '%s'" % field
+ raise ValueError("Output format lacks mandatory field '%s'" % field)
# open file
ff = open(filename, mode="wt")
ff.write("# (%s) = format\n# The above line defines the field order and is required.\n\n" % format)
diff --git a/Tigger/Models/Formats/ModelHTML.py b/Tigger/Models/Formats/ModelHTML.py
index ac95104..c83964a 100644
--- a/Tigger/Models/Formats/ModelHTML.py
+++ b/Tigger/Models/Formats/ModelHTML.py
@@ -26,7 +26,7 @@
import time
import traceback
-from HTMLParser import HTMLParser
+from html.parser import HTMLParser
import Kittens.utils
@@ -89,7 +89,7 @@ def load(filename, **kw):
parser.feed(line)
parser.close()
if not parser.toplevel_objects:
- raise RuntimeError, "failed to load sky model from file %s" % filename
+ raise RuntimeError("failed to load sky model from file %s" % filename)
return parser.toplevel_objects[0]
diff --git a/Tigger/Models/Formats/NEWSTAR.py b/Tigger/Models/Formats/NEWSTAR.py
index 548c8e6..ad98b3f 100644
--- a/Tigger/Models/Formats/NEWSTAR.py
+++ b/Tigger/Models/Formats/NEWSTAR.py
@@ -152,7 +152,7 @@ def load(filename, import_src=True, import_cc=True, min_extent=0, **kw):
## temp dict to hold unique nodenames
unamedict = {}
### Models -- 56 bytes
- for ii in xrange(0, nsources):
+ for ii in range(0, nsources):
mdl = numpy.fromfile(ff, dtype=numpy.uint8, count=56)
### source parameters
@@ -188,7 +188,7 @@ def load(filename, import_src=True, import_cc=True, min_extent=0, **kw):
# NEWSTAR MDL lists might have same source twice if they are
# clean components, so make a unique name for them
bname = 'N' + str(id)
- if unamedict.has_key(bname):
+ if bname in unamedict:
uniqname = bname + '_' + str(unamedict[bname])
unamedict[bname] += 1
else:
diff --git a/Tigger/Models/Formats/PyBDSMGaul.py b/Tigger/Models/Formats/PyBDSMGaul.py
index 3742866..b2c8db4 100644
--- a/Tigger/Models/Formats/PyBDSMGaul.py
+++ b/Tigger/Models/Formats/PyBDSMGaul.py
@@ -85,7 +85,7 @@ def load(filename, freq0=None, **kw):
if format and freq0:
break
if not format:
- raise ValueError, "this .gaul file does not appear to contain a format string"
+ raise ValueError("this .gaul file does not appear to contain a format string")
# call ASCII.load() function now that we have the format dict
kw['format'] = format
return ASCII.load(filename, **kw)
diff --git a/Tigger/Models/Formats/__init__.py b/Tigger/Models/Formats/__init__.py
index ea9e3a9..53a3713 100644
--- a/Tigger/Models/Formats/__init__.py
+++ b/Tigger/Models/Formats/__init__.py
@@ -46,7 +46,7 @@ def _initFormats():
__import__(format, globals(), locals())
except:
traceback.print_exc()
- print "Error loading support for format '%s', see above. Format will not be available." % format
+ print("Error loading support for format '%s', see above. Format will not be available." % format)
_FormatsInitialized = True
@@ -78,7 +78,7 @@ def getFormatExtensions(name):
def determineFormat(filename):
"""Tries to determine file format by filename. Returns name,import_func,export_func,docstring if found, None,None,None,None otherwise."""
_initFormats()
- for name, (import_func, export_func, doc, extensions) in Formats.iteritems():
+ for name, (import_func, export_func, doc, extensions) in Formats.items():
for ext in extensions:
if filename.endswith(ext):
return name, import_func, export_func, doc
@@ -117,7 +117,7 @@ def load(filename, format=None, verbose=True):
if not import_func:
raise TypeError("Unknown model format '%s'" % format)
if verbose:
- print "Loading %s: %s" % (filename, doc)
+ print("Loading %s: %s" % (filename, doc))
return import_func(filename)
@@ -125,5 +125,5 @@ def save(model, filename, format=None, verbose=True):
"""Saves a sky model."""
name, import_func, export_func, doc = resolveFormat(filename, format)
if verbose:
- print "Saving %s: %s" % (filename, doc)
+ print("Saving %s: %s" % (filename, doc))
return export_func(model, filename)
diff --git a/Tigger/Models/ModelClasses.py b/Tigger/Models/ModelClasses.py
index c0b8111..cbbaeaa 100644
--- a/Tigger/Models/ModelClasses.py
+++ b/Tigger/Models/ModelClasses.py
@@ -73,33 +73,33 @@ def __init__(self, *args, **kws):
mandatory attributes, and its keyword arguments as optional attributes"""
# check for argument errors
if len(args) < len(self.mandatory_attrs):
- raise TypeError, "too few arguments in constructor of " + self.__class__.__name__
+ raise TypeError("too few arguments in constructor of " + self.__class__.__name__)
if len(args) > len(self.mandatory_attrs):
- raise TypeError, "too many arguments in constructor of " + self.__class__.__name__
+ raise TypeError("too many arguments in constructor of " + self.__class__.__name__)
# set mandatory attributes from argument list
for attr, value in zip(self.mandatory_attrs, args):
if not isinstance(value, AllowedTypesTuple):
- raise TypeError, "invalid type %s for attribute %s (class %s)" % (
- type(value).__name__, attr, self.__class__.__name__)
+ raise TypeError("invalid type %s for attribute %s (class %s)" % (
+ type(value).__name__, attr, self.__class__.__name__))
setattr(self, attr, value)
# set optional attributes from keywords
- for kw, default in self.optional_attrs.iteritems():
+ for kw, default in self.optional_attrs.items():
value = kws.pop(kw, default)
if not isinstance(value, AllowedTypesTuple):
- raise TypeError, "invalid type %s for attribute %s (class %s)" % (
- type(value).__name__, kw, self.__class__.__name__)
+ raise TypeError("invalid type %s for attribute %s (class %s)" % (
+ type(value).__name__, kw, self.__class__.__name__))
setattr(self, kw, value)
# set extra attributes, if any are left
self._extra_attrs = set()
if self.allow_extra_attrs:
- for kw, value in kws.iteritems():
+ for kw, value in kws.items():
if not isinstance(value, AllowedTypesTuple):
- raise TypeError, "invalid type %s for attribute %s (class %s)" % (
- type(value).__name__, kw, self.__class__.__name__)
+ raise TypeError("invalid type %s for attribute %s (class %s)" % (
+ type(value).__name__, kw, self.__class__.__name__))
self.setAttribute(kw, value)
elif kws:
- raise TypeError, "unknown parameters %s in constructor of %s" % (
- ','.join(kws.keys()), self.__class__.__name__)
+ raise TypeError("unknown parameters %s in constructor of %s" % (
+ ','.join(list(kws.keys())), self.__class__.__name__))
# other init
self._signaller = None
self._connections = set()
@@ -115,7 +115,7 @@ def signalsEnabled(self):
def connect(self, signal_name, receiver, reconnect=False):
"""Connects SIGNAL from object to specified receiver slot. If reconnect is True, allows duplicate connections."""
if not self._signaller:
- raise RuntimeError, "ModelItem.connect() called before enableSignals()"
+ raise RuntimeError("ModelItem.connect() called before enableSignals()")
import PyQt4.Qt
if reconnect or (signal_name, receiver) not in self._connections:
self._connections.add((signal_name, receiver))
@@ -124,16 +124,16 @@ def connect(self, signal_name, receiver, reconnect=False):
def emit(self, signal_name, *args):
"""Emits named SIGNAL from this object ."""
if not self._signaller:
- raise RuntimeError, "ModelItem.emit() called before enableSignals()"
+ raise RuntimeError("ModelItem.emit() called before enableSignals()")
import PyQt4.Qt
self._signaller.emit(PyQt4.Qt.SIGNAL(signal_name), *args)
def registerClass(classobj):
if not isinstance(classobj, type):
- raise TypeError, "registering invalid class object: %s" % classobj
+ raise TypeError("registering invalid class object: %s" % classobj)
globals()[classobj.__name__] = classobj
AllowedTypes[classobj.__name__] = classobj
- AllowedTypesTuple = tuple(AllowedTypes.itervalues())
+ AllowedTypesTuple = tuple(AllowedTypes.values())
registerClass = classmethod(registerClass)
@@ -154,7 +154,7 @@ def getExtraAttributes(self):
def getAttributes(self):
"""Returns list of all attributes (mandatory+optional+extra), as (attr,value) tuples"""
attrs = [(attr, getattr(self, attr)) for attr in self.mandatory_attrs]
- for attr, default in self.optional_attrs.iteritems():
+ for attr, default in self.optional_attrs.items():
val = getattr(self, attr, default)
if val != default:
attrs.append((attr, val))
@@ -216,7 +216,7 @@ def _resolveTags(self, tags, attr=None):
elif isinstance(tags, (list, tuple)):
tag, tags = tags[0], tags[1:]; # stack of tags supplied: use first here, pass rest to sub-items
else:
- raise ValueError, "invalid 'tags' parameter of type " + str(type(tags))
+ raise ValueError("invalid 'tags' parameter of type " + str(type(tags)))
# if tag is None, use default
tag = tag or self.attr_rendertag.get(attr, None) or "A"
if tag.endswith('\n'):
@@ -248,7 +248,7 @@ def renderMarkup(self, tags=None, attrname=None):
for attr in self.mandatory_attrs:
markup += self.renderAttrMarkup(attr, getattr(self, attr), tags=tags, mandatory=True)
# write optional attributes only wheh non-default
- for attr, default in sorted(self.optional_attrs.iteritems()):
+ for attr, default in sorted(self.optional_attrs.items()):
val = getattr(self, attr, default)
if val != default:
markup += self.renderAttrMarkup(attr, val, tags=tags)
@@ -260,11 +260,11 @@ def renderMarkup(self, tags=None, attrname=None):
return markup
numpy_int_types = tuple([
- getattr(numpy, "%s%d" % (t, d)) for t in "int", "uint" for d in 8, 16, 32, 64
+ getattr(numpy, "%s%d" % (t, d)) for t in ("int", "uint") for d in 8, 16, 32, 64
if hasattr(numpy, "%s%d" % (t, d))
])
numpy_float_types = tuple([
- getattr(numpy, "float%d" % d) for d in 32, 64, 96, 128
+ getattr(numpy, "float%d" % d) for d in (32, 64, 96, 128)
if hasattr(numpy, "float%d" % d)
])
@@ -304,7 +304,7 @@ def renderAttrMarkup(self, attr, value, tags=None, verbose=None, mandatory=False
markup += ">"
if verbose:
markup += comment % (verbose + ":")
- for key, item in sorted(value.iteritems()):
+ for key, item in sorted(value.items()):
markup += self.renderAttrMarkup(key, item, tags=tags)
# render everything else inline
else:
@@ -455,13 +455,13 @@ def strDesc(self, **kw):
startup_dprint(1, "end of class defs")
# populate dict of AllowedTypes with all classes defined so far
-globs = list(globals().iteritems())
+globs = list(globals().items())
-AllowedTypes = dict(AtomicTypes.iteritems())
+AllowedTypes = dict(iter(AtomicTypes.items()))
AllowedTypes['NoneType'] = type(None); # this must be a type, otherwise isinstance() doesn't work
for name, val in globs:
if isinstance(val, type):
AllowedTypes[name] = val
-AllowedTypesTuple = tuple(AllowedTypes.itervalues())
+AllowedTypesTuple = tuple(AllowedTypes.values())
startup_dprint(1, "end of ModelClasses")
diff --git a/Tigger/Models/PlotStyles.py b/Tigger/Models/PlotStyles.py
index 956dd3c..654f861 100644
--- a/Tigger/Models/PlotStyles.py
+++ b/Tigger/Models/PlotStyles.py
@@ -26,7 +26,7 @@
import math
-import ModelClasses
+from . import ModelClasses
# string used to indicate default value of an attribute
DefaultValue = "default"
@@ -80,10 +80,10 @@ class PlotStyle(ModelClasses.ModelItem):
def copy(self):
return PlotStyle(
- **dict([(attr, getattr(self, attr, default)) for attr, default in DefaultPlotAttrs.iteritems()]))
+ **dict([(attr, getattr(self, attr, default)) for attr, default in DefaultPlotAttrs.items()]))
def update(self, other):
- for attr in DefaultPlotAttrs.iterkeys():
+ for attr in DefaultPlotAttrs.keys():
val = getattr(other, attr, None)
if val is not None and val != DefaultValue:
setattr(self, attr, val)
@@ -131,7 +131,7 @@ def makeSourceLabel(label, src):
return ""
global _label_keys
lbl = label
- for key, func in _label_keys.iteritems():
+ for key, func in _label_keys.items():
if lbl.find(key) >= 0:
lbl = lbl.replace(key, func(src))
return lbl
diff --git a/Tigger/Models/SkyModel.py b/Tigger/Models/SkyModel.py
index 6dd94dd..652f6c4 100644
--- a/Tigger/Models/SkyModel.py
+++ b/Tigger/Models/SkyModel.py
@@ -26,9 +26,10 @@
import re
-import PlotStyles
-from ModelClasses import ModelItem
+from . import PlotStyles
+from .ModelClasses import ModelItem
from Tigger.Coordinates import angular_dist_pos_angle, DEG
+from functools import reduce
class ModelTag(ModelItem):
@@ -53,7 +54,7 @@ def get(self, tagname):
return self.tags.setdefault(tagname, ModelTag(tagname))
def getAll(self):
- all = self.tags.values()
+ all = list(self.tags.values())
all.sort(lambda a, b: cmp(a.name, b.name))
return all
@@ -72,7 +73,7 @@ def renderMarkup(self, tag="A", attrname=None):
markup += "mdlattr=%s " % attrname
markup += ">"
# write mandatory attributes
- for name, tt in self.tags.iteritems():
+ for name, tt in self.tags.items():
markup += self.renderAttrMarkup(name, tt, tag="TR", mandatory=True)
# closing tag
markup += "%s>" % tag
@@ -137,7 +138,7 @@ def __init__(self, name, func, style=PlotStyles.DefaultPlotStyle, sources=None):
self.computeTotal(sources)
def computeTotal(self, sources):
- self.total = len(filter(self.func, sources))
+ self.total = len(list(filter(self.func, sources)))
return self.total
@@ -318,7 +319,7 @@ def initGroupings(self):
def _remakeGroupList(self):
self.groupings = [self.defgroup, self.curgroup, self.selgroup]
- typenames = self._typegroups.keys()
+ typenames = list(self._typegroups.keys())
typenames.sort()
self.groupings += [self._typegroups[name] for name in typenames]
self.groupings += [self._taggroups[name] for name in self.tagnames]
@@ -411,7 +412,7 @@ def fieldCenter(self):
def save(self, filename, format=None, verbose=True):
"""Convenience function, saves model to file. Format may be specified explicitly, or determined from filename."""
- import Formats
+ from . import Formats
Formats.save(self, filename, format=format, verbose=verbose)
_re_bynumber = re.compile("^([!-])?(\\d+)?:(\\d+)?$")
diff --git a/Tigger/SiameseInterface.py b/Tigger/SiameseInterface.py
index 4f3617e..abaa712 100644
--- a/Tigger/SiameseInterface.py
+++ b/Tigger/SiameseInterface.py
@@ -204,7 +204,7 @@ def source_list(self, ns, max_sources=None, **kw):
# If source is solvable and this particular attribute is solvable, replace
# value in attrs dict with a Meq.Parm.
if solvable:
- for parmname, value in attrs.items():
+ for parmname, value in list(attrs.items()):
sgname = _Subgroups.get(parmname, None)
if sgname in subgroups:
solvable = True
diff --git a/Tigger/Tools/FITSHeaders.py b/Tigger/Tools/FITSHeaders.py
index 4c426bd..cda0148 100644
--- a/Tigger/Tools/FITSHeaders.py
+++ b/Tigger/Tools/FITSHeaders.py
@@ -12,11 +12,11 @@
def isAxisTypeX(ctype):
"""Checks if given CTYPE corresponds to the X axis"""
- return any([ctype.startswith(prefix) for prefix in "RA", "GLON", "ELON", "HLON", "SLON"]) or \
+ return any([ctype.startswith(prefix) for prefix in ("RA", "GLON", "ELON", "HLON", "SLON")]) or \
ctype in ("L", "X", "LL", "U", "UU")
def isAxisTypeY(ctype):
"""Checks if given CTYPE corresponds to the Y axis"""
- return any([ctype.startswith(prefix) for prefix in "DEC", "GLAT", "ELAT", "HLAT", "SLAT"]) or \
+ return any([ctype.startswith(prefix) for prefix in ("DEC", "GLAT", "ELAT", "HLAT", "SLAT")]) or \
ctype in ("M", "Y", "MM", "V", "VV")
diff --git a/Tigger/Tools/Imaging.py b/Tigger/Tools/Imaging.py
index c5e55e4..6b61b83 100644
--- a/Tigger/Tools/Imaging.py
+++ b/Tigger/Tools/Imaging.py
@@ -37,7 +37,7 @@
from scipy.ndimage.filters import convolve
from scipy.ndimage.interpolation import map_coordinates
-import FITSHeaders
+from . import FITSHeaders
from Tigger.Coordinates import Projection
_verbosity = Kittens.utils.verbosity(name="imaging")
@@ -68,7 +68,7 @@ def fitPsf(filename, cropsize=None):
elif len(psf.shape) == 3:
psf = psf[0, :, :]
else:
- raise RuntimeError, "illegal PSF shape %s" + psf.shape
+ raise RuntimeError("illegal PSF shape %s" + psf.shape)
nx, ny = psf.shape
# crop the central region
if cropsize:
@@ -83,14 +83,14 @@ def fitPsf(filename, cropsize=None):
iy = numpy.where(psf[nx // 2, :] < 0)[0]
iy0 = max(iy[iy < ny // 2])
iy1 = min(iy[iy > ny // 2])
- print ix0, ix1, iy0, iy1
+ print(ix0, ix1, iy0, iy1)
psf = psf[ix0:ix1, iy0:iy1]
psf[psf < 0] = 0
# estimate gaussian parameters, then fit
- import gaussfitter2
+ from . import gaussfitter2
parms0 = gaussfitter2.moments(psf, circle=0, rotate=1, vheight=0)
- print parms0
+ print(parms0)
dprint(2, "Estimated parameters are", parms0)
parms = gaussfitter2.gaussfit(psf, None, parms0, autoderiv=1, return_all=0, circle=0, rotate=1, vheight=0)
dprint(0, "Fitted parameters are", parms)
@@ -121,7 +121,7 @@ def convolveGaussian(x1, y1, p1, x2, y2, p2):
with another Gaussian given by x2,y2,p2, and returns the extents
and angle of the resulting Gaussian."""
# convert to Fourier plane extents, FT transforms a -> pi^2/a
- u1, v1, u2, v2 = [(math.pi ** 2) * 2 * a ** 2 for a in x1, y1, x2, y2]
+ u1, v1, u2, v2 = [(math.pi ** 2) * 2 * a ** 2 for a in (x1, y1, x2, y2)]
# print "uv coeffs",u1,v1,u2,v2
c1, s1 = math.cos(p1), math.sin(p1)
c2, s2 = math.cos(p2), math.sin(p2)
@@ -198,12 +198,12 @@ def match_ctype(ctype, ctype_list):
iy = iax
elif ctype == 'STOKES':
if istokes is not None:
- raise ValueError, "duplicate STOKES axis in FITS file %s" % filename
+ raise ValueError("duplicate STOKES axis in FITS file %s" % filename)
istokes = iax
crval = hdr.get('CRVAL' + axs, 0)
cdelt = hdr.get('CDELT' + axs, 1)
crpix = hdr.get('CRPIX' + axs, 1) - 1
- values = map(int, list(crval + (numpy.arange(data.shape[iax]) - crpix) * cdelt))
+ values = list(map(int, list(crval + (numpy.arange(data.shape[iax]) - crpix) * cdelt)))
stokes_names = [(FITSHeaders.StokesNames[i]
if i > 0 and i < len(FITSHeaders.StokesNames) else "%d" % i) for i in values]
else:
@@ -211,7 +211,7 @@ def match_ctype(ctype, ctype_list):
other_axes_ctype.append(ctype)
# not found?
if ix is None or iy is None:
- raise ValueError, "FITS file %s does not appear to contain an X and/or Y axis" % filename
+ raise ValueError("FITS file %s does not appear to contain an X and/or Y axis" % filename)
# form up shape of resulting image, and order of axes for transpose
shape = [data.shape[ix], data.shape[iy]]
axes = [ix, iy]
@@ -490,7 +490,7 @@ def make_axis_indexer(n, elem_index=numpy.newaxis):
# model_stp will be [0,-1,-1,1]
model_stp = [(model_stokes.index(st) if st in model_stokes else -1) for st in stokes]
if model_stp[0] < 0:
- print "Warning: model image %s lacks Stokes %s, skipping." % (src.shape.filename, model_stokes[0])
+ print("Warning: model image %s lacks Stokes %s, skipping." % (src.shape.filename, model_stokes[0]))
continue
# figure out whether the images overlap at all
# in the trivial case, both images have the same WCS, so no resampling is needed
@@ -513,8 +513,8 @@ def make_axis_indexer(n, elem_index=numpy.newaxis):
continue
# warn about ignored model axes (e.g. when model has frequency and our output doesn't)
if removed_model_axes:
- print "Warning: model image %s has one or more axes that are not present in the output image:" % src.shape.filename
- print " taking the first plane along (%s)." % (",".join(removed_model_axes))
+ print("Warning: model image %s has one or more axes that are not present in the output image:" % src.shape.filename)
+ print(" taking the first plane along (%s)." % (",".join(removed_model_axes)))
# evaluate convolution kernel for this model scale, if not already cached
conv_kernel = conv_kernels.get((modelproj.xscale, modelproj.yscale), None)
if conv_kernel is None:
@@ -557,8 +557,8 @@ def make_axis_indexer(n, elem_index=numpy.newaxis):
model_indices = indices
# else error
else:
- raise RuntimeError, "axis %s of model image %s doesn't match that of output image" % \
- (extra_data_axes[axis - 3], src.shape.filename)
+ raise RuntimeError("axis %s of model image %s doesn't match that of output image" % \
+ (extra_data_axes[axis - 3], src.shape.filename))
# update list of slices
slices = [(sd0 + sd, si0 + si) for sd0, si0 in slices for sd, si in zip(indices, model_indices)]
# now loop over slices and assign
From 23c33f0580495fde965a9688d3dc7f63e780ecc9 Mon Sep 17 00:00:00 2001
From: Gijs Molenaar
Date: Fri, 6 Apr 2018 11:39:08 +0200
Subject: [PATCH 07/13] run more binaires as test
---
Dockerfile | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/Dockerfile b/Dockerfile
index 18cf456..496bb76 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,8 +1,12 @@
FROM kernsuite/base:3
RUN docker-apt-install python-pip
RUN docker-apt-install python-setuptools python-numpy python-scipy python-astropy python-kittens
+RUN pip install astro-kittens astlib
ADD . /code
RUN pip install /code
RUN /usr/local/bin/tigger-convert /code/test/3C147-HI6.refmodel.lsm /tmp/output.txt
+RUN /usr/local/bin/tigger-make-brick /code/test/3C147-HI6.refmodel.lsm /tmp/brick
+RUN /usr/local/bin/tigger-tag /code/test/3C147-HI6.refmodel.lsm gijs
+RUN /usr/local/bin/tigger-restore
RUN echo "the next command should not print 1"
RUN wc -l /tmp/output.txt
From b6517e51775ba5e7743c6cd52663191700b3fbb0 Mon Sep 17 00:00:00 2001
From: Gijs Molenaar
Date: Fri, 6 Apr 2018 14:56:08 +0200
Subject: [PATCH 08/13] be more ignorant
---
.gitignore | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/.gitignore b/.gitignore
index 55532e6..32c273d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -4,5 +4,6 @@
debian/
build/
MANIFEST
-astro_tigger.egg-info/
+*.egg-info/
+.venv*/
dist/
From 0161f2188e372bdbd8ca03b17b328950d831ae43 Mon Sep 17 00:00:00 2001
From: Gijs Molenaar
Date: Fri, 4 May 2018 14:19:24 +0200
Subject: [PATCH 09/13] fix refactor error
---
Tigger/SiameseInterface.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Tigger/SiameseInterface.py b/Tigger/SiameseInterface.py
index abaa712..0c22330 100644
--- a/Tigger/SiameseInterface.py
+++ b/Tigger/SiameseInterface.py
@@ -76,7 +76,7 @@ def compile_options(self):
"""Returns list of compile-time options"""
if not self._compile_opts:
self._compile_opts = [
- TDLRuntimeOptions("filename", "Tigger LSM file",
+ TDLOption("filename", "Tigger LSM file",
TDLFileSelect("Tigger models (*." + ModelHTML.DefaultExtension + ");;All files (*)",
default=self.filename, exist=True),
namespace=self),
From d520da628a7685c6744b7e62e13f6f4887996651 Mon Sep 17 00:00:00 2001
From: Gijs Molenaar
Date: Fri, 11 May 2018 10:11:59 +0200
Subject: [PATCH 10/13] fix issue #6
---
setup.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/setup.py b/setup.py
index b2df9de..10a0280 100644
--- a/setup.py
+++ b/setup.py
@@ -5,7 +5,7 @@
__version__ = "1.4.2"
-requirements = ['astro_kittens', 'numpy', 'scipy', 'astlib', 'astropy']
+requirements = ['astro_kittens', 'numpy', 'scipy', 'astlib', 'astropy', 'future']
scripts = [
'Tigger/bin/tigger-convert',
From 542c9dd44d359177fdf4bc69e5e554082d7d9c8e Mon Sep 17 00:00:00 2001
From: Gijs Molenaar
Date: Fri, 3 Aug 2018 10:50:19 +0200
Subject: [PATCH 11/13] more py3 issues
---
Tigger/Models/ModelClasses.py | 2 +-
Tigger/bin/tigger-convert | 161 +++++++++++++++++-----------------
2 files changed, 82 insertions(+), 81 deletions(-)
diff --git a/Tigger/Models/ModelClasses.py b/Tigger/Models/ModelClasses.py
index cbbaeaa..a2b04bd 100644
--- a/Tigger/Models/ModelClasses.py
+++ b/Tigger/Models/ModelClasses.py
@@ -260,7 +260,7 @@ def renderMarkup(self, tags=None, attrname=None):
return markup
numpy_int_types = tuple([
- getattr(numpy, "%s%d" % (t, d)) for t in ("int", "uint") for d in 8, 16, 32, 64
+ getattr(numpy, "%s%d" % (t, d)) for t in ("int", "uint") for d in (8, 16, 32, 64)
if hasattr(numpy, "%s%d" % (t, d))
])
numpy_float_types = tuple([
diff --git a/Tigger/bin/tigger-convert b/Tigger/bin/tigger-convert
index 51fb58e..19d41d4 100755
--- a/Tigger/bin/tigger-convert
+++ b/Tigger/bin/tigger-convert
@@ -99,14 +99,15 @@ if __name__ == '__main__':
break
dirname = os.path.dirname(dirname)
else:
- print "Unable to locate the Tigger directory, it is not a parent of %s. Please check your installation and/or PYTHONPATH." % os.path.realpath(
- __file__)
+ print(("Unable to locate the Tigger directory, it is not a parent of %s. Please check your installation"
+ "and/or PYTHONPATH." % os.path.realpath( __file__)))
sys.exit(1)
sys.path.append(os.path.dirname(dirname))
try:
import Tigger
except:
- print "Unable to import the Tigger package from %s. Please check your installation and PYTHONPATH." % dirname
+ print(("Unable to import the Tigger package from %s. Please check your installation and PYTHONPATH." %
+ dirname))
sys.exit(1)
# some things can implicitly invoke matplotlib, which can cry when no X11 is around
@@ -280,7 +281,7 @@ is a Tigger model (-f switch must be specified to allow overwriting), or else a
min_extent = (options.min_extent / 3600) * DEG
if options.help_format:
- print ASCII.FormatHelp
+ print((ASCII.FormatHelp))
sys.exit(0)
# get filenames
@@ -310,8 +311,8 @@ is a Tigger model (-f switch must be specified to allow overwriting), or else a
import pyrap.quanta
except:
traceback.print_exc()
- print "Failed to import pyrap.measures, which is required by one of the options you specified."
- print "You probably need to install the 'pyrap' package for this to work."
+ print("Failed to import pyrap.measures, which is required by one of the options you specified.")
+ print("You probably need to install the 'pyrap' package for this to work.")
sys.exit(1)
measures_dmdq = pyrap.measures.measures(), pyrap.quanta
return measures_dmdq
@@ -331,7 +332,7 @@ is a Tigger model (-f switch must be specified to allow overwriting), or else a
qq = dm.get_value(coord_dir)
return [q.get_value('rad') for q in qq]
except:
- print "Error parsing or converting coordinate string '%s', see traceback:" % coords
+ print("Error parsing or converting coordinate string '%s', see traceback:" % coords)
traceback.print_exc()
sys.exit(1)
@@ -366,7 +367,7 @@ is a Tigger model (-f switch must be specified to allow overwriting), or else a
selections = []
for selstr in (options.select or []):
match = re.match("^(?i)([^=<>!.]+)(%s)([^dms]+)([dms])?" % "|".join(
- [key.replace('.', '\.') for key in select_predicates.keys()]), selstr)
+ [key.replace('.', '\.') for key in list(select_predicates.keys())]), selstr)
if not match:
parser.error("Malformed --select string '%s'." % selstr)
try:
@@ -381,7 +382,7 @@ is a Tigger model (-f switch must be specified to allow overwriting), or else a
input_type, import_func, dum, input_doc = Tigger.Models.Formats.resolveFormat(skymodel,
options.type if options.type != AUTO else None)
except:
- print "Unable to determine model type for %s, please specify one explicitly with the -t/--type option." % skymodel
+ print("Unable to determine model type for %s, please specify one explicitly with the -t/--type option." % skymodel)
sys.exit(1)
# figure out output type, if explicitly specified
@@ -393,13 +394,13 @@ is a Tigger model (-f switch must be specified to allow overwriting), or else a
output_type, dum, export_func, output_doc = Tigger.Models.Formats.getFormat(options.output_type)
output_extensions = Tigger.Models.Formats.getFormatExtensions(options.output_type)
if not export_func or not extensions:
- print "Output model type '%s' is not supported." % options.output_type
+ print("Output model type '%s' is not supported." % options.output_type)
sys.exit(1)
# figure out output name, if not specified
if output is None:
if not output_type:
- print "An output filename and/or an explicit output model type (-o/--output-type) must be specfified."
+ print("An output filename and/or an explicit output model type (-o/--output-type) must be specfified.")
sys.exit(1)
# get base input name
# if input extension is "lsm.html", then split off two extensions, not just one
@@ -414,30 +415,30 @@ is a Tigger model (-f switch must be specified to allow overwriting), or else a
except:
export_func = None
if not export_func:
- print "Unable to determine model type for %s, please specify one explicitly with the -o/--output-type option." % output
+ print("Unable to determine model type for %s, please specify one explicitly with the -o/--output-type option." % output)
sys.exit(1)
# check if we need to overwrite
if os.path.exists(output) and not options.force:
- print "Output file %s already exists. Use the -f switch to overwrite." % output
+ print("Output file %s already exists. Use the -f switch to overwrite." % output)
sys.exit(1)
- print "Reading %s (%s)" % (skymodel, input_doc)
+ print("Reading %s (%s)" % (skymodel, input_doc))
# load the model
try:
model = import_func(skymodel, min_extent=min_extent, format=options.format, center=center_radec,
verbose=options.verbose)
- except Exception, exc:
+ except Exception as exc:
if options.verbose:
traceback.print_exc()
- print "Error loading model:", str(exc)
+ print("Error loading model:", str(exc))
sys.exit(1)
sources = model.sources
if not sources:
- print "Input model %s contains no sources" % skymodel
+ print("Input model %s contains no sources" % skymodel)
else:
- print "Model contains %d sources" % len(sources)
+ print("Model contains %d sources" % len(sources))
# append, if specified
if options.append:
@@ -447,9 +448,9 @@ is a Tigger model (-f switch must be specified to allow overwriting), or else a
append_type, append_func, dum, append_doc = Tigger.Models.Formats.resolveFormat(filename,
options.append_type if options.append_type != AUTO else None)
except:
- print "Unable to determine model type for %s, please specify one explicitly with the --append-type option." % filename
+ print("Unable to determine model type for %s, please specify one explicitly with the --append-type option." % filename)
sys.exit(1)
- print "Reading %s (%s)" % (filename, append_doc)
+ print("Reading %s (%s)" % (filename, append_doc))
# read model to be appended
model2 = append_func(filename, min_extent=min_extent, format=options.append_format or options.format)
if model2.sources:
@@ -460,11 +461,11 @@ is a Tigger model (-f switch must be specified to allow overwriting), or else a
if options.refresh_r:
for src in model2.sources:
src.setAttribute('r', Coordinates.angular_dist_pos_angle(ra0, dec0, *model.fieldCenter())[0])
- print "Appended %d sources from %s (%s)" % (len(model2.sources), filename, append_doc)
+ print("Appended %d sources from %s (%s)" % (len(model2.sources), filename, append_doc))
# apply center, if specified
if options.center:
- print "Center of field set to %s" % options.center
+ print("Center of field set to %s" % options.center)
model.setFieldCenter(*center_radec)
# apply selection by tag
@@ -475,18 +476,18 @@ is a Tigger model (-f switch must be specified to allow overwriting), or else a
for tag in tags:
sources = [src for src in sources if getattr(src, tag, False)]
if not sources:
- print "No sources left after selection by tag (-T/--tag) has been applied."
+ print("No sources left after selection by tag (-T/--tag) has been applied.")
sys.exit(0)
- print "Selection by tag (%s) reduces this to %d sources" % (", ".join(options.tags), len(sources))
+ print("Selection by tag (%s) reduces this to %d sources" % (", ".join(options.tags), len(sources)))
# apply selection by NaN
if options.remove_nans:
sources = [src for src in sources if not any([math.isnan(x)
- for x in src.pos.ra, src.pos.dec, src.flux.I])]
+ for x in (src.pos.ra, src.pos.dec, src.flux.I)])]
if not sources:
- print "No sources left after applying --remove-nans."
+ print("No sources left after applying --remove-nans.")
sys.exit(0)
- print "Removing NaN positions and fluxes reduces this to %d sources" % len(sources)
+ print("Removing NaN positions and fluxes reduces this to %d sources" % len(sources))
# remove sources
if options.remove_source:
@@ -498,7 +499,7 @@ is a Tigger model (-f switch must be specified to allow overwriting), or else a
patt = patt[1:-1]
match = fnmatch.filter([src.name for src in sources], patt.replace("\\", ""))
remove_names.update(match)
- print "Removing sources: %s matches %s" % (patt, ",".join(sorted(match)))
+ print("Removing sources: %s matches %s" % (patt, ",".join(sorted(match))))
sources = [src for src in sources if src.name not in remove_names]
# add brick
@@ -514,7 +515,7 @@ is a Tigger model (-f switch must be specified to allow overwriting), or else a
except:
parser.error("Invalid --add-brick setting %s" % brickspec)
if [src.name for src in sources if src.name == name]:
- print "Error: model already contains a source named '%s'" % name
+ print("Error: model already contains a source named '%s'" % name)
# add brick
from astropy.io import fits as pyfits
from astLib.astWCS import WCS
@@ -553,7 +554,7 @@ is a Tigger model (-f switch must be specified to allow overwriting), or else a
if not options.refresh_r:
source.setAttribute('r', Coordinates.angular_dist_pos_angle(ra0, dec0, *model.fieldCenter())[0])
sources.append(source)
- print "Adding FITS source %s (%s,pad=%f) with tags %s" % (srcname, fitsfile, pad, tags)
+ print("Adding FITS source %s (%s,pad=%f) with tags %s" % (srcname, fitsfile, pad, tags))
# convert apparent flux to intrinsic using the NEWSTAR beam gain
if options.newstar_app_to_int:
@@ -568,9 +569,9 @@ is a Tigger model (-f switch must be specified to allow overwriting), or else a
src.removeAttribute('flux_apparent')
src.setAttribute('flux_intrinsic', True)
nsrc += 1
- print "Converted NEWSTAR apparent to intrinsic flux for %d model sources" % nsrc
+ print("Converted NEWSTAR apparent to intrinsic flux for %d model sources" % nsrc)
if len(sources) != nsrc:
- print " (%d sources were skipped for whatever reason.)" % (len(model.sources) - nsrc)
+ print(" (%d sources were skipped for whatever reason.)" % (len(model.sources) - nsrc))
elif options.newstar_int_to_app:
nsrc = 0
for src in sources:
@@ -583,18 +584,18 @@ is a Tigger model (-f switch must be specified to allow overwriting), or else a
src.removeAttribute('flux_intrinsic')
src.setAttribute('flux_apparent', True)
nsrc += 1
- print "Converted NEWSTAR apparent to intrinsic flux for %d model sources" % nsrc
+ print("Converted NEWSTAR apparent to intrinsic flux for %d model sources" % nsrc)
if len(sources) != nsrc:
- print " (%d sources were skipped for whatever reason.)" % (len(model.sources) - nsrc)
+ print(" (%d sources were skipped for whatever reason.)" % (len(model.sources) - nsrc))
# set refrence frequency
if options.ref_freq >= 0:
model.setRefFreq(options.ref_freq * 1e+6)
- print "Setting reference frequency to %f MHz" % options.ref_freq
+ print("Setting reference frequency to %f MHz" % options.ref_freq)
# recenter
if options.recenter:
- print "Shifting model to new center %s" % options.recenter
+ print("Shifting model to new center %s" % options.recenter)
ra0, dec0 = model.fieldCenter()
field_center = ra1, dec1 = recenter_radec
ddec = dec1 - dec0
@@ -612,7 +613,7 @@ is a Tigger model (-f switch must be specified to allow overwriting), or else a
# recompute radial distance
if options.refresh_r:
- print "Recomputing the 'r' attribute based on the field center"
+ print("Recomputing the 'r' attribute based on the field center")
model.recomputeRadialDistance()
@@ -629,7 +630,7 @@ is a Tigger model (-f switch must be specified to allow overwriting), or else a
# get tag value
srctag = [(src, getTagValue(src, tag)) for src in model.sources]
sources = [src for src, tag in srctag if tag is not None and predicate(tag, value)]
- print "Selection '%s' leaves %d out of %d sources" % (selstr, len(sources), len(model.sources))
+ print("Selection '%s' leaves %d out of %d sources" % (selstr, len(sources), len(model.sources)))
if len(sources) != len(model.sources):
model.setSources(sources)
@@ -638,15 +639,15 @@ is a Tigger model (-f switch must be specified to allow overwriting), or else a
if pb == "refresh":
pb = model.primaryBeam()
if pb:
- print "Recalculating apparent fluxes"
+ print("Recalculating apparent fluxes")
else:
- print "No primary beam expression in model, ignoring '--primary-beam refresh' option"
+ print("No primary beam expression in model, ignoring '--primary-beam refresh' option")
if options.app_to_int or options.int_to_app:
pb = pb or model.primaryBeam()
if pb:
- print "Converting apparent fluxes to intrinsic" if options.app_to_int else "Converting intrinsic fluxes to apparent"
+ print("Converting apparent fluxes to intrinsic" if options.app_to_int else "Converting intrinsic fluxes to apparent")
else:
- print "No primary beam expression in model and no --primary-beam option given, cannot convert between apparent and intrinsic."
+ print("No primary beam expression in model and no --primary-beam option given, cannot convert between apparent and intrinsic.")
sys.exit(1)
if pb:
fitsBeam = False
@@ -685,12 +686,12 @@ is a Tigger model (-f switch must be specified to allow overwriting), or else a
vbs = []
for icorr, corr in enumerate(CORRS_XY if options.linear_pol else CORRS_RL):
if icorr in (1, 2):
- print ' omitting %s beam due to --beam-diag' % corr
+ print(' omitting %s beam due to --beam-diag' % corr)
vbs.append(0)
else:
# make FITS images or nulls for real and imaginary part
filenames = [make_beam_filename(pb, corr, 're'), make_beam_filename(pb, corr, 'im')]
- print 'Loading FITS Beams', filenames[0], filenames[1]
+ print('Loading FITS Beams', filenames[0], filenames[1])
vb = InterpolatedBeams.LMVoltageBeam(verbose=(options.verbose or 0) - 2, l_axis=options.fits_l_axis,
m_axis=options.fits_m_axis)
vb.read(*filenames)
@@ -700,22 +701,22 @@ is a Tigger model (-f switch must be specified to allow overwriting), or else a
# get frequency
# fq = model.refFreq() or 1.4e+9
beamRefFreq = (options.beam_freq or 0) * 1e+6 or model.refFreq() or 1424500000.12
- print "Using FITS beams with reference frequency %f MHz" % (beamRefFreq * 1e-6)
+ print("Using FITS beams with reference frequency %f MHz" % (beamRefFreq * 1e-6))
else: # else, assume pb is an expession
try:
pbexp = eval('lambda r,fq:' + pb)
dum = pbexp(0, 1e+9); # evaluate at r=0 and 1 GHz as a test
if not isinstance(dum, float):
- raise TypeError, "does not evaluate to a float"
- except Exception, exc:
- print "Bad primary beam expression '%s': %s" % (pb, str(exc))
+ raise TypeError("does not evaluate to a float")
+ except Exception as exc:
+ print("Bad primary beam expression '%s': %s" % (pb, str(exc)))
sys.exit(1)
model.setPrimaryBeam(pb)
# get frequency
# fq = model.refFreq() or 1.4e+9
fq = (options.beam_freq or 0) * 1e+6 or model.refFreq() or 1424500000.12
- print "Using beam expression '%s' with reference frequency %f MHz" % (pb, fq * 1e-6)
+ print("Using beam expression '%s' with reference frequency %f MHz" % (pb, fq * 1e-6))
nsrc = 0
# ensure that every source has an 'r' attribute
@@ -734,7 +735,7 @@ is a Tigger model (-f switch must be specified to allow overwriting), or else a
src.setAttribute('beamgain', bg)
src.setAttribute('Iapp', src.flux.I * bg)
nsrc += 1
- print "Applied primary beam expression to %d model sources" % nsrc
+ print("Applied primary beam expression to %d model sources" % nsrc)
else:
# precompute PAs if fitsBeams are used
if fitsBeam:
@@ -764,7 +765,7 @@ is a Tigger model (-f switch must be specified to allow overwriting), or else a
pas = []
zenith = dm.direction('AZEL', '0deg', '90deg')
for ms, field in ms_field:
- print "Getting PA range from MS %s, field %d" % (ms, field)
+ print("Getting PA range from MS %s, field %d" % (ms, field))
tab = table(ms)
antpos = table(tab.getkeyword("ANTENNA")).getcol("POSITION")
ra, dec = table(tab.getkeyword("FIELD")).getcol("PHASE_DIR", field, 1)[0][0]
@@ -788,13 +789,13 @@ is a Tigger model (-f switch must be specified to allow overwriting), or else a
pylab.xlabel("Time since beginning of observation, hours")
pylab.ylabel("PA, degrees")
pylab.savefig(os.path.basename(ms) + ".parangle.png")
- print "Saved plot " + os.path.basename(ms) + ".parangle.png"
- print "MS %s, PA range is %fdeg to %fdeg" % (ms, pa1[0], pa1[-1])
+ print("Saved plot " + os.path.basename(ms) + ".parangle.png")
+ print("MS %s, PA range is %fdeg to %fdeg" % (ms, pa1[0], pa1[-1]))
# get lm's rotated through those ranges
pa_range = numpy.array(pas)
elif options.pa_range is not None:
try:
- ang0, ang1 = map(float, options.pa_range.split(",", 1))
+ ang0, ang1 = list(map(float, options.pa_range.split(",", 1)))
except:
parser.error("Incorrect --pa-range option. FROM,TO values expected.")
pa_range = numpy.arange(ang0, ang1 + 1, 1) * DEG
@@ -803,8 +804,8 @@ is a Tigger model (-f switch must be specified to allow overwriting), or else a
else:
pa_range = None
if options.verbose:
- print "PA (deg):", " ".join(["%f" % (x / DEG) for x in pa_range]) if numpy.iterable(
- pa_range) else pa_range
+ print("PA (deg):", " ".join(["%f" % (x / DEG) for x in pa_range]) if numpy.iterable(
+ pa_range) else pa_range)
if options.enable_plots:
import pylab
@@ -834,8 +835,8 @@ is a Tigger model (-f switch must be specified to allow overwriting), or else a
a, b, c, d = [j.mean() for j in jones]
mueller = Jones2Mueller(numpy.matrix([[a, b], [c, d]]))
if options.verbose > 1:
- print "%s: jones11 mean %f std %f" % (src.name, abs(a), abs(jones[0]).std())
- print "%s: jones22 mean %f std %f" % (src.name, abs(d), abs(jones[3]).std())
+ print("%s: jones11 mean %f std %f" % (src.name, abs(a), abs(jones[0]).std()))
+ print("%s: jones22 mean %f std %f" % (src.name, abs(d), abs(jones[3]).std()))
if options.enable_plots:
pylab.plot(abs(jones[0]), label="|J11| " + src.name)
# new-style averaging of Mueller matrix
@@ -846,12 +847,12 @@ is a Tigger model (-f switch must be specified to allow overwriting), or else a
if options.enable_plots:
pylab.plot([m[0, 0] for m in muellers], label='M11 ' + src.name)
if options.verbose > 1:
- print "%s: jones11 mean %f std %f" % (
- src.name, abs(jones[0].mean()), abs(jones[0]).std())
- print "%s: jones22 mean %f std %f" % (
- src.name, abs(jones[3].mean()), abs(jones[3]).std())
- print "%s: mueller11 mean %f std %f" % (
- src.name, mueller[0, 0], numpy.std([m[0, 0] for m in muellers]))
+ print("%s: jones11 mean %f std %f" % (
+ src.name, abs(jones[0].mean()), abs(jones[0]).std()))
+ print("%s: jones22 mean %f std %f" % (
+ src.name, abs(jones[3].mean()), abs(jones[3]).std()))
+ print("%s: mueller11 mean %f std %f" % (
+ src.name, mueller[0, 0], numpy.std([m[0, 0] for m in muellers])))
bg = mueller[0, 0]
## OMS 6/7/2015: let's do full inversion now to correct all four polarizations
if options.app_to_int:
@@ -876,7 +877,7 @@ is a Tigger model (-f switch must be specified to allow overwriting), or else a
iquv0 = numpy.matrix([[getattr(src.flux, stokes, 0.)] for stokes in "IQUV"])
iquv = mueller * iquv0
if options.verbose > 1:
- print "%s: from %s to %s" % (src.name, iquv0.T, iquv.T)
+ print("%s: from %s to %s" % (src.name, iquv0.T, iquv.T))
if options.app_to_int and hasattr(src.flux, "I"):
src.setAttribute("Iapp", src.flux.I)
for i, stokes in enumerate("IQUV"):
@@ -923,7 +924,7 @@ is a Tigger model (-f switch must be specified to allow overwriting), or else a
ispiVal + spi, beamRefFreq))
if options.verbose:
- print ("%s: beamgain" % src.name), bg, "spi", spi, "clipped" if nobeam else ""
+ print(("%s: beamgain" % src.name), bg, "spi", spi, "clipped" if nobeam else "")
# if spiBg is not None:
# print src.name,repr(freqgrid),repr(spiBg.mean(0))
@@ -940,15 +941,15 @@ is a Tigger model (-f switch must be specified to allow overwriting), or else a
if options.enable_plots:
pylab.legend()
pylab.savefig("beamgains.png")
- print "Saved plot beamgains.png"
- print "Converted between apparent/intrinsic flux for %d model sources" % nsrc
+ print("Saved plot beamgains.png")
+ print("Converted between apparent/intrinsic flux for %d model sources" % nsrc)
if len(model.sources) != nsrc:
- print " (%d sources were skipped for whatever reason, probably they didn't have an 'r' attribute)" % (
- len(model.sources) - nsrc)
+ print(" (%d sources were skipped for whatever reason, probably they didn't have an 'r' attribute)" % (
+ len(model.sources) - nsrc))
# rename using COPART
if options.rename:
- print "Renaming sources using the COPART convention"
+ print("Renaming sources using the COPART convention")
typecodes = dict(Gau="G", FITS="F")
# sort sources by decreasing flux
sources = sorted(sources, lambda a, b: cmp(b.brightness(), a.brightness()))
@@ -1032,13 +1033,13 @@ is a Tigger model (-f switch must be specified to allow overwriting), or else a
sources = []
for i, src in enumerate(sources0):
if src.name in names:
- print "Duplicate source '%s' at #%d (first found at #%d), removing" % (src.name, i, names[src.name])
+ print("Duplicate source '%s' at #%d (first found at #%d), removing" % (src.name, i, names[src.name]))
else:
names[src.name] = i
sources.append(src)
# assign prefix to source names
if options.prefix:
- print "Prefixing source names with '%s'" % options.prefix
+ print("Prefixing source names with '%s'" % options.prefix)
for src in sources:
src.name = options.prefix + src.name
# merge clusters
@@ -1050,26 +1051,26 @@ is a Tigger model (-f switch must be specified to allow overwriting), or else a
clusname = getattr(src, 'cluster', '')
clusters.setdefault(clusname, {})[src.name] = src
# unclustered sources copied over as-is
- new_sources = clusters.pop('', {}).values()
+ new_sources = list(clusters.pop('', {}).values())
# next, deal with each cluster
- for clusname, srcdict in clusters.iteritems():
+ for clusname, srcdict in clusters.items():
# leading source has the same name as the cluster
src0 = srcdict.get(clusname)
# if no leading source, or leading source not tagged, or length 1, then copy cluster as-is
if not src0 or len(srcdict) < 2 or (tags is not None and
not any([getattr(src0, tag, None) for tag in tags])):
- new_sources += srcdict.values()
+ new_sources += list(srcdict.values())
else:
# sum fluxes
for x in 'IQUV':
if hasattr(src0.flux, x):
- setattr(src0.flux, x, sum([getattr(s.flux, x, 0) for s in srcdict.itervalues()]))
+ setattr(src0.flux, x, sum([getattr(s.flux, x, 0) for s in srcdict.values()]))
if hasattr(src0, 'Iapp'):
- src0.Iapp = sum([getattr(s, 'Iapp', 0) for s in srcdict.itervalues()])
+ src0.Iapp = sum([getattr(s, 'Iapp', 0) for s in srcdict.values()])
new_sources.append(src0)
- print "Merged cluster %s (%d sources)" % (src0.name, len(srcdict))
+ print("Merged cluster %s (%d sources)" % (src0.name, len(srcdict)))
sources = new_sources
model.setSources(sources)
# save output
- print "Saving model containing %d sources to %s (%s)" % (len(sources), output, output_doc)
+ print("Saving model containing %d sources to %s (%s)" % (len(sources), output, output_doc))
export_func(model, output, sources=sources, format=options.output_format or None)
From a52558db908d5c48fa6a96964109119a515e81d1 Mon Sep 17 00:00:00 2001
From: Gijs Molenaar
Date: Fri, 3 Aug 2018 10:57:43 +0100
Subject: [PATCH 12/13] fix test suite
---
.gitignore | 1 +
Dockerfile | 14 ++++++--------
Tigger/Models/Formats/ASCII.py | 6 +++---
...efmodel.lsm => 3C147-HI6.refmodel.lsm.html} | 0
test/bla.fits | Bin 0 -> 270720 bytes
5 files changed, 10 insertions(+), 11 deletions(-)
rename test/{3C147-HI6.refmodel.lsm => 3C147-HI6.refmodel.lsm.html} (100%)
create mode 100644 test/bla.fits
diff --git a/.gitignore b/.gitignore
index 32c273d..17df08e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -7,3 +7,4 @@ MANIFEST
*.egg-info/
.venv*/
dist/
+test/bla.restored.fits
diff --git a/Dockerfile b/Dockerfile
index 496bb76..d2139fd 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,12 +1,10 @@
-FROM kernsuite/base:3
-RUN docker-apt-install python-pip
-RUN docker-apt-install python-setuptools python-numpy python-scipy python-astropy python-kittens
-RUN pip install astro-kittens astlib
+FROM kernsuite/base:4
+RUN docker-apt-install python-setuptools python-numpy python-scipy python-astropy python-astro-kittens python-astlib python-pip
ADD . /code
RUN pip install /code
-RUN /usr/local/bin/tigger-convert /code/test/3C147-HI6.refmodel.lsm /tmp/output.txt
-RUN /usr/local/bin/tigger-make-brick /code/test/3C147-HI6.refmodel.lsm /tmp/brick
-RUN /usr/local/bin/tigger-tag /code/test/3C147-HI6.refmodel.lsm gijs
-RUN /usr/local/bin/tigger-restore
+RUN /usr/local/bin/tigger-convert /code/test/3C147-HI6.refmodel.lsm.html /tmp/output.txt
+RUN /usr/local/bin/tigger-make-brick /code/test/3C147-HI6.refmodel.lsm.html /code/test/bla.fits
+RUN /usr/local/bin/tigger-tag /code/test/3C147-HI6.refmodel.lsm.html gijs
+RUN /usr/local/bin/tigger-restore -f /code/test/bla.fits /code/test/3C147-HI6.refmodel.lsm.html
RUN echo "the next command should not print 1"
RUN wc -l /tmp/output.txt
diff --git a/Tigger/Models/Formats/ASCII.py b/Tigger/Models/Formats/ASCII.py
index 0623082..ff8cdb0 100644
--- a/Tigger/Models/Formats/ASCII.py
+++ b/Tigger/Models/Formats/ASCII.py
@@ -142,7 +142,7 @@ def getval(num, scale=1):
# now process file line-by-line
linenum = 0
format_str = ''
- for line in file(filename):
+ for line in open(filename):
# for the first line, figure out the file format
if not linenum:
if not format and line.startswith("#format:"):
@@ -361,9 +361,9 @@ def getval(num, scale=1):
brightest_name = src.name
radec0 = ra, dec
except:
- if verbose:
- traceback.print_exc()
dprintf(0, "%s:%d: %s, skipping\n", filename, linenum, str(sys.exc_info()[1]))
+ if verbose:
+ raise
dprintf(2, "imported %d sources from file %s\n", len(srclist), filename)
# create model
model = ModelClasses.SkyModel(*srclist)
diff --git a/test/3C147-HI6.refmodel.lsm b/test/3C147-HI6.refmodel.lsm.html
similarity index 100%
rename from test/3C147-HI6.refmodel.lsm
rename to test/3C147-HI6.refmodel.lsm.html
diff --git a/test/bla.fits b/test/bla.fits
new file mode 100644
index 0000000000000000000000000000000000000000..7cbc9286a3e507c8c8a267d57cfc8de33e2b26ca
GIT binary patch
literal 270720
zcmeI$&u<%90S9m{TsU#$f;2)a0@6<6AF*SX%}S2zw(d4bH+Fx)0Zo!gYBjNg?QEO9
za9as+0KtI^7vREye}X>&gv0?Ye+MVTn@P+jtb;-+cYv=e;*?Je~IH
z{d=t_T8^e{-Dn}&>FuS__8=WaTZ8`2V7MQR2hpu|w-b%VNq;*TZl5`ErQN;XempfV
zE|)@H|8RdZ9Y%wlXtOsSMF(MjI~ga@LGMMncP1+TXRYzL-3fbB>$i9&FONxHgqpr)
z%kzp;^GcO!$lFW%yW^)>{Z8_FqbQU=Z_D#a7t9Og&)f36^3=R2)MI-6LizKyJa6%W
zd7=DyTkCPRwRZEIyllMWLnEUbrQ>KnIm(93IO+AH^u;*sk9vdtnf__6udcS%y5ZyU
z6Mt}PFMZM53`4HHpX{blcbN1?2ZP}_>Ku*6>HZI+YzQXfDD=v9y3^~Y+fm4#YmCOj
zL4UA+^hI76oKb%G;OWt*w>6r3Kl)^_cetNM<)!NT(LokpuGD`N#^L7HU^~4Ll?sJo
zZf&fGx2KKg!Yp|Y*4mlgax{1M$-+vj
zu^P>t`t*h9K|h;j$uP~F?gq_gNBN}NOq2an1t+^JtBt$aeyCqzzEB7xOXXUvQmU0}
zOV!tm+sXMazpkvd*G}aX%hh_JQY_T#weVAWx%8X=U%B7N3my9vE7kdWsZuDHi%a!N
zxV~I?YkmBny|(`N)&2-A5B;$a-OYC1K6a)1BpVQVAgH5;8qH0tdi?j_^FaK_a2mAkEG7=R&f!Zc&j
zaeLP{+PAY26Y?H)ntN#yCif|`h3HWn)}5_qz5Z_0%PtYa==rcW4${%)QPk*fhk?Gn
zvoj3uC)s(;tUvPW{oF<)j^j>y?QGsgZ};gqN=9497pExc?}n?-)?gH_EvG(svfJDU
z%iGlTqgblXFn^mH4;%MR=f(B%e6?I%s#L1Q8RkdxX6s%z&x^D9TUb21O`7r6ug&cG
za_qOA?&hbR_IvA`Nx{57^7V4=W~(`+KW?U5d%b=#4hzMb`XbBB`yI(mKE^Zq#J{$-Z+GM^u3^NOWfC5-H_%`)Gz`EfR{QZCnui`9B{
zro3!^9Q)n*^qA(f-|MlayqnLDaQ_vqf1U37N3G7;{+M#c>ksAqk>|ZyZsz{D%Dk*U
zt}-v{k7K_{gD=$|o%^k3=Se5)kM8>Y^=7zNy;R=KMz!diUMw0yj*wQe_8r{qPmT>nD-LSCmkEpL|fBg<=dv-|Ncw&L;X
zHq-i+<*je5-a5&-
zNRzAW!Y9daVXwEju=RXzVKm+?Zf_>F&FxB@ELIld#YtXL+KH1&Qjd%EO0t+F+m*Ch
zSUC9#`_p3KX|XVxUk!IRZ`ksOO>ZdU?dvP>4I2Rh1PBlyK!5-N0t5&UAV7cs0RjXF
z5FkK+009C72oNAZ;6&iV@BH=<27&7)aO2;9|HXByv-%PsK!5-N0t5&UAV7cs0RjXF
z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7csfr-F}-`-rC?0Y3ZfB*pk1PBly
zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF
z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk
z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs
z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ
zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U
zAV7cs0RjXF5FkK+009C72wZo8<V-u=Vdlv9QP0RjXF5cp~ZmY@85
z>#MDSPbWZt009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7csfiGL&gWvq=n_u>4$R|L6
z009C7-YtO-zT4h-w`!y|1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7u3ljIpWj-(
z`c#Po2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1l}yL
z{KwzFcylQw2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ;5rE`
zfA-$z*QvhhN`L?X0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U
zAV7cs0RjXF5FkK+009DDZ-M2X{PHhfZvm|*K!5-N0t5&UAV7cs0RjXF5FkK+009C7
z2oNAZfB*pk1PBlyK!5-N0t5)WdjiY<`rhB(y;`Xe0RjXF5FkK+009C72oNAZfB*pk
z1PBlyK!5-N0t5(r@aMlipg@2C0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U
zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7
z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N
z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+
z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly
zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAn>ZdhyVG<
ze_uW72mt~F2oNAZfB=EZ2`vBW*T1`5g0l$_AV7cs0RjXF5FkK+009C72oNAZfB*pk
z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs
z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ
jfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkL{It%;{tbhuH
literal 0
HcmV?d00001
From 80256a2ec431a5d62cce8684121dfaacda95402d Mon Sep 17 00:00:00 2001
From: Gijs Molenaar
Date: Fri, 3 Aug 2018 12:19:24 +0100
Subject: [PATCH 13/13] tests now also work with python3
---
.dockerignore | 1 +
Dockerfile | 30 ++++++++++---
Tigger/Models/Formats/ModelHTML.py | 2 +-
Tigger/Models/Formats/__init__.py | 4 +-
Tigger/Models/SkyModel.py | 1 +
Tigger/bin/tigger-make-brick | 52 +++++++++++------------
Tigger/bin/tigger-restore | 45 ++++++++++----------
Tigger/bin/tigger-tag | 68 +++++++++++++++---------------
setup.py | 2 +-
9 files changed, 114 insertions(+), 91 deletions(-)
diff --git a/.dockerignore b/.dockerignore
index b4806da..1e8b756 100644
--- a/.dockerignore
+++ b/.dockerignore
@@ -2,3 +2,4 @@
.gitignore
.idea/
.venv2/
+.venv3/
diff --git a/Dockerfile b/Dockerfile
index d2139fd..354afcf 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,10 +1,30 @@
FROM kernsuite/base:4
-RUN docker-apt-install python-setuptools python-numpy python-scipy python-astropy python-astro-kittens python-astlib python-pip
+RUN docker-apt-install \
+ python-setuptools \
+ python-numpy \
+ python-scipy \
+ python-astropy \
+ python-astro-kittens \
+ python-astlib \
+ python-pip \
+ python3-setuptools \
+ python3-numpy \
+ python3-scipy \
+ python3-astropy \
+ python3-astlib \
+ python3-pip
+RUN docker-apt-install git
ADD . /code
+RUN pip3 install git+https://github.com/ska-sa/kittens.git@modernize
RUN pip install /code
-RUN /usr/local/bin/tigger-convert /code/test/3C147-HI6.refmodel.lsm.html /tmp/output.txt
-RUN /usr/local/bin/tigger-make-brick /code/test/3C147-HI6.refmodel.lsm.html /code/test/bla.fits
-RUN /usr/local/bin/tigger-tag /code/test/3C147-HI6.refmodel.lsm.html gijs
-RUN /usr/local/bin/tigger-restore -f /code/test/bla.fits /code/test/3C147-HI6.refmodel.lsm.html
+RUN python2 /usr/local/bin/tigger-convert /code/test/3C147-HI6.refmodel.lsm.html /tmp/output.txt
+RUN python2 /usr/local/bin/tigger-make-brick /code/test/3C147-HI6.refmodel.lsm.html /code/test/bla.fits
+RUN python2 /usr/local/bin/tigger-tag /code/test/3C147-HI6.refmodel.lsm.html gijs
+RUN python2 /usr/local/bin/tigger-restore -f /code/test/bla.fits /code/test/3C147-HI6.refmodel.lsm.html
+RUN pip3 install /code
+RUN python3 /usr/local/bin/tigger-convert -f /code/test/3C147-HI6.refmodel.lsm.html /tmp/output.txt
+RUN python3 /usr/local/bin/tigger-make-brick /code/test/3C147-HI6.refmodel.lsm.html /code/test/bla.fits
+RUN python3 /usr/local/bin/tigger-tag /code/test/3C147-HI6.refmodel.lsm.html gijs
+RUN python3 /usr/local/bin/tigger-restore -f /code/test/bla.fits /code/test/3C147-HI6.refmodel.lsm.html
RUN echo "the next command should not print 1"
RUN wc -l /tmp/output.txt
diff --git a/Tigger/Models/Formats/ModelHTML.py b/Tigger/Models/Formats/ModelHTML.py
index c83964a..2c0db97 100644
--- a/Tigger/Models/Formats/ModelHTML.py
+++ b/Tigger/Models/Formats/ModelHTML.py
@@ -85,7 +85,7 @@ def save(model, filename, sources=None, **kw):
def load(filename, **kw):
parser = ModelIndexParser()
parser.reset()
- for line in file(filename):
+ for line in open(filename):
parser.feed(line)
parser.close()
if not parser.toplevel_objects:
diff --git a/Tigger/Models/Formats/__init__.py b/Tigger/Models/Formats/__init__.py
index 53a3713..0362ce4 100644
--- a/Tigger/Models/Formats/__init__.py
+++ b/Tigger/Models/Formats/__init__.py
@@ -23,7 +23,7 @@
# or write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
-
+import importlib
import traceback
import Kittens.utils
@@ -43,7 +43,7 @@ def _initFormats():
if not _FormatsInitialized:
for format in ["ModelHTML", "ASCII", "BBS", "NEWSTAR", "AIPSCC", "AIPSCCFITS", "PyBDSMGaul"]:
try:
- __import__(format, globals(), locals())
+ importlib.import_module("Tigger.Models.Formats." + format)
except:
traceback.print_exc()
print("Error loading support for format '%s', see above. Format will not be available." % format)
diff --git a/Tigger/Models/SkyModel.py b/Tigger/Models/SkyModel.py
index 652f6c4..7d5ab53 100644
--- a/Tigger/Models/SkyModel.py
+++ b/Tigger/Models/SkyModel.py
@@ -109,6 +109,7 @@ def brightness(self):
if iapp is not None:
return iapp
else:
+ print(self.flux)
return getattr(self.flux, 'I', 0.)
def get_attr(self, attr, default=None):
diff --git a/Tigger/bin/tigger-make-brick b/Tigger/bin/tigger-make-brick
index df61912..11885e0 100755
--- a/Tigger/bin/tigger-make-brick
+++ b/Tigger/bin/tigger-make-brick
@@ -105,20 +105,20 @@ while the brick itself will be added (as a FITS image component), and a new sky
# check if we need to overwrite
if output_model and os.path.exists(output_model) and not options.force:
- print "Output file %s already exists. Use the -f switch to overwrite." % output_model
+ print("Output file %s already exists. Use the -f switch to overwrite." % output_model)
sys.exit(1)
# load model, apply selection
model = Tigger.load(skymodel)
- print "Loaded model", skymodel
+ print("Loaded model", skymodel)
# apply selection
sources0 = model.getSourceSubset(options.subset)
# make sure only point sources are left
sources = [src for src in sources0 if src.typecode == "pnt"]
- print "Selection leaves %d source(s), of which %d are point source(s)" % (len(sources0), len(sources))
+ print("Selection leaves %d source(s), of which %d are point source(s)" % (len(sources0), len(sources)))
if not sources:
- print "There's nothing to convert into a brick."
+ print("There's nothing to convert into a brick.")
sys.exit(1)
# get PB expression
@@ -126,37 +126,37 @@ while the brick itself will be added (as a FITS image component), and a new sky
if options.primary_beam:
if options.primary_beam.upper() == "WSRT":
pbfunc = lambda r, fq: cos(min(65 * fq * 1e-9 * r, 1.0881)) ** 6
- print "Primary beam expression is standard WSRT cos^6: 'cos(min(65*fq*1e-9*r,1.0881))**6'"
+ print("Primary beam expression is standard WSRT cos^6: 'cos(min(65*fq*1e-9*r,1.0881))**6'")
elif options.primary_beam.upper() == "NEWSTAR":
pbfunc = lambda r, fq: max(cos(65 * 1e-9 * fq * r) ** 6, .01)
- print "Primary beam expression is standard NEWSTAR cos^6: 'max(cos(65*1e-9*fq*r)**6,.01)'"
+ print("Primary beam expression is standard NEWSTAR cos^6: 'max(cos(65*1e-9*fq*r)**6,.01)'")
else:
try:
pbfunc = eval("lambda r,fq:" + options.primary_beam)
- except Exception, err:
- print "Error parsing primary beam expression %s: %s" % (options.primary_beam, str(err))
+ except Exception as err:
+ print("Error parsing primary beam expression %s: %s" % (options.primary_beam, str(err)))
sys.exit(1)
- print "Primary beam expression is ", options.primary_beam
+ print("Primary beam expression is ", options.primary_beam)
# get frequency
freq = (options.freq or model.refFreq() or 1400) * 1e+6
- print "Brick frequency is %f MHz" % (freq * 1e-6)
+ print("Brick frequency is %f MHz" % (freq * 1e-6))
# read fits file
try:
input_hdu = pyfits.open(fitsfile)[0]
hdr = input_hdu.header
- except Exception, err:
- print "Error reading FITS file %s: %s" % (fitsfile, str(err))
+ except Exception as err:
+ print("Error reading FITS file %s: %s" % (fitsfile, str(err)))
sys.exit(1)
- print "Using FITS file", fitsfile
+ print("Using FITS file", fitsfile)
# reset data if asked to
if not options.add_to_image:
input_hdu.data[...] = 0
- print "Contents of FITS image will be reset"
+ print("Contents of FITS image will be reset")
else:
- print "Adding source(s) to FITS image"
+ print("Adding source(s) to FITS image")
# Parse header to figure out RA and DEC axes
ra_axis = dec_axis = None
for iaxis in range(1, hdr['NAXIS'] + 1):
@@ -168,18 +168,18 @@ while the brick itself will be added (as a FITS image component), and a new sky
dec_axis = iaxis
dec0pix = hdr["CRPIX%d" % iaxis] - 1
if ra_axis is None or dec_axis is None:
- print "Can't find RA and/or DEC axis in this FITS image"
+ print("Can't find RA and/or DEC axis in this FITS image")
sys.exit(1)
# make WCS from header
wcs = WCS(hdr, mode='pyfits')
ra0, dec0 = wcs.pix2wcs(ra0pix, dec0pix)
- print "Image reference pixel (%d,%d) is at %f,%f deg" % (ra0pix, dec0pix, ra0, dec0)
+ print("Image reference pixel (%d,%d) is at %f,%f deg" % (ra0pix, dec0pix, ra0, dec0))
# apply x/y pixel offset
if options.x_offset or options.y_offset:
ra0, dec0 = wcs.pix2wcs(ra0pix + options.x_offset, dec0pix + options.y_offset)
- print "Applying x/y offset moves this to %f,%f deg" % (ra0, dec0)
+ print("Applying x/y offset moves this to %f,%f deg" % (ra0, dec0))
hdr["CRVAL%d" % ra_axis] = ra0
hdr["CRVAL%d" % dec_axis] = dec0
wcs = WCS(hdr, mode='pyfits')
@@ -188,12 +188,12 @@ while the brick itself will be added (as a FITS image component), and a new sky
Imaging.restoreSources(input_hdu, sources, 0, primary_beam=pbfunc, freq=freq)
# save fits file
try:
- input_hdu.writeto(fitsfile, clobber=True)
- except Exception, err:
- print "Error writing FITS file %s: %s" % (fitsfile, str(err))
+ input_hdu.writeto(fitsfile, overwrite=True)
+ except Exception as err:
+ print("Error writing FITS file %s: %s" % (fitsfile, str(err)))
sys.exit(1)
- print "Added %d source(s) into FITS file %s" % (len(sources), fitsfile)
- print "Using pad factor", options.padding
+ print("Added %d source(s) into FITS file %s" % (len(sources), fitsfile))
+ print("Using pad factor", options.padding)
# remove sources from model if asked to
if not options.keep_sources:
@@ -216,7 +216,7 @@ while the brick itself will be added (as a FITS image component), and a new sky
for src in model.sources:
if isinstance(getattr(src, 'shape', None), ModelClasses.FITSImage) and os.path.samefile(src.shape.filename,
fitsfile):
- print "Model already contains a component (%s) for this image. Updating the component" % src.name
+ print("Model already contains a component (%s) for this image. Updating the component" % src.name)
# update source parameters
src.position.ra, src.position.dec = ra0, dec0
src.flux.I = max_flux
@@ -231,9 +231,9 @@ while the brick itself will be added (as a FITS image component), and a new sky
shape = ModelClasses.FITSImage(sx, sy, 0, fitsfile, nx, ny, pad=options.padding)
sname = options.source_name or os.path.splitext(os.path.basename(fitsfile))[0]
img_src = SkyModel.Source(sname, pos, flux, shape=shape)
- print "Inserting new model component named %s" % sname
+ print("Inserting new model component named %s" % sname)
sources.append(img_src)
# save model
model.setSources(sources)
model.save(output_model)
- print "Saved %d source(s) to output model %s." % (len(model.sources), output_model)
+ print("Saved %d source(s) to output model %s." % (len(model.sources), output_model))
diff --git a/Tigger/bin/tigger-restore b/Tigger/bin/tigger-restore
index 1a37d6a..47ec55e 100755
--- a/Tigger/bin/tigger-restore
+++ b/Tigger/bin/tigger-restore
@@ -30,6 +30,7 @@ import os
import sys
from astropy.io import fits as pyfits
+from past.builtins import cmp
if __name__ == '__main__':
@@ -103,10 +104,10 @@ an output image is not specified, makes a name for it automatically.""")
try:
import Tigger
except:
- print "Unable to import the Tigger package. Please check your installation and PYTHONPATH."
+ print("Unable to import the Tigger package. Please check your installation and PYTHONPATH.")
sys.exit(1)
- Tigger.nuke_matplotlib(); # don't let the door hit you in the ass, sucka
+ #Tigger.nuke_matplotlib(); # don't let the door hit you in the ass, sucka
from Tigger.Tools import Imaging
from Tigger.Tools.Imaging import FWHM, DEG, ARCSEC
@@ -120,15 +121,15 @@ an output image is not specified, makes a name for it automatically.""")
input_type, import_func, dum, input_doc = Tigger.Models.Formats.resolveFormat(skymodel,
options.type if options.type != AUTO else None)
except:
- print "Unable to determine model type for %s, please specify one explicitly with the -t/--type option." % skymodel
+ print("Unable to determine model type for %s, please specify one explicitly with the -t/--type option." % skymodel)
sys.exit(1)
- print "Reading %s (%s)" % (skymodel, input_doc)
+ print("Reading %s (%s)" % (skymodel, input_doc))
model = import_func(skymodel, format=options.format)
Imaging.dprintf(1, "Read %d sources from %s\n", len(model.sources), skymodel)
- sources = sorted(model.sources, lambda a, b: cmp(b.brightness(), a.brightness()))
+ sources = sorted(model.sources, key=lambda a: a.brightness()) #, lambda a, b: cmp(b.brightness(), a.brightness()))
# apply counts and flux scales
if options.nsrc:
@@ -158,12 +159,12 @@ an output image is not specified, makes a name for it automatically.""")
if len(ff) == 1:
gx = gy = float(ff[0])
grot = 0
- print "User-specified restoring beam of %.2f\"" % gx
+ print("User-specified restoring beam of %.2f\"" % gx)
else:
- gx, gy, grot = map(float, ff)
- print "User-specified restoring beam of %.2f\" by %.2f\" at PA %.2f deg" % (gx, gy, grot)
+ gx, gy, grot = list(map(float, ff))
+ print("User-specified restoring beam of %.2f\" by %.2f\" at PA %.2f deg" % (gx, gy, grot))
except:
- print "Invalid -b/--restoring-beam setting."
+ print("Invalid -b/--restoring-beam setting.")
sys.exit(1)
gx /= FWHM * ARCSEC
gy /= FWHM * ARCSEC
@@ -171,16 +172,16 @@ an output image is not specified, makes a name for it automatically.""")
elif options.psf:
# fit the PSF
gx, gy, grot = Imaging.fitPsf(options.psf)
- print "Fitted restoring beam to PSF file %s: %.2f\" by %.2f\" at PA %.2f deg" % (
- options.psf, gx * FWHM * ARCSEC, gy * FWHM * ARCSEC, grot * DEG)
+ print("Fitted restoring beam to PSF file %s: %.2f\" by %.2f\" at PA %.2f deg" % (
+ options.psf, gx * FWHM * ARCSEC, gy * FWHM * ARCSEC, grot * DEG))
else:
# else look in input header
- gx, gy, grot = [input_hdu.header.get(x, None) for x in 'BMAJ', 'BMIN', 'BPA']
- if any([x is None for x in gx, gy, grot]):
- print "Unable to determine restoring beam size, no BMAJ/BMIN/BPA keywords in input image.",
- print "Try using the -b/-p options to specify an explicit restoring beam."
+ gx, gy, grot = [input_hdu.header.get(x, None) for x in ('BMAJ', 'BMIN', 'BPA')]
+ if any([x is None for x in (gx, gy, grot)]):
+ print("Unable to determine restoring beam size, no BMAJ/BMIN/BPA keywords in input image.")
+ print("Try using the -b/-p options to specify an explicit restoring beam.")
sys.exit(1)
- print "Restoring beam (as per input header) is %.2f\" by %.2f\" at PA %.2f deg" % (gx * 3600, gy * 3600, grot)
+ print("Restoring beam (as per input header) is %.2f\" by %.2f\" at PA %.2f deg" % (gx * 3600, gy * 3600, grot))
gx /= DEG * FWHM
gy /= DEG * FWHM
grot /= DEG
@@ -193,22 +194,22 @@ an output image is not specified, makes a name for it automatically.""")
pbexp = eval('lambda r,fq:' + model.primaryBeam())
dum = pbexp(0, 1e+9); # evaluate at r=0 and 1 GHz as a test
if not isinstance(dum, float):
- raise TypeError, "Primary beam expression does not evaluate to a float"
- except Exception, exc:
- print "Bad primary beam expression '%s': %s" % (pb, str(exc))
+ raise TypeError("Primary beam expression does not evaluate to a float")
+ except Exception as exc:
+ print("Bad primary beam expression '%s': %s" % (pb, str(exc)))
sys.exit(1)
if not freq:
- print "Model must contain a reference requency, or else specify one with --freq."
+ print("Model must contain a reference requency, or else specify one with --freq.")
sys.exit(1)
# read, restore, write
- print "Restoring model into input image %s" % input_image
+ print("Restoring model into input image %s" % input_image)
if options.clear:
input_hdu.data[...] = 0
Imaging.restoreSources(input_hdu, sources, gx, gy, grot, primary_beam=pbexp, freq=freq,
apply_beamgain=options.beamgain, ignore_nobeam=options.ignore_nobeam)
- print "Writing output image %s" % output_image
+ print("Writing output image %s" % output_image)
if os.path.exists(output_image):
os.remove(output_image)
input_hdu.writeto(output_image)
diff --git a/Tigger/bin/tigger-tag b/Tigger/bin/tigger-tag
index 1e6a07f..4133549 100755
--- a/Tigger/bin/tigger-tag
+++ b/Tigger/bin/tigger-tag
@@ -47,7 +47,7 @@ def transfer_tags(fromlsm, lsm, output, tags, tolerance, tigger):
"""
# now, set dE tags on sources
tagset = frozenset(tags.split())
- print("Transferring tags %s from %s to %s (%.2f\" tolerance)" % (",".join(tagset), fromlsm, lsm, tolerance))
+ print(("Transferring tags %s from %s to %s (%.2f\" tolerance)" % (",".join(tagset), fromlsm, lsm, tolerance)))
refmodel = tigger.load(fromlsm)
model = tigger.load(lsm)
@@ -60,8 +60,8 @@ def transfer_tags(fromlsm, lsm, output, tags, tolerance, tigger):
if tagval is not None:
if src.getTag(tag, None) != tagval:
src.setTag(tag, tagval)
- print("setting tag %s=%s on source %s (from reference source %s)" % (
- tag, tagval, src.name, src0.name))
+ print(("setting tag %s=%s on source %s (from reference source %s)" % (
+ tag, tagval, src.name, src0.name)))
model.save(output)
@@ -83,14 +83,14 @@ if __name__ == '__main__':
break
dirname = os.path.dirname(dirname)
else:
- print "Unable to locate the Tigger directory, it is not a parent of %s. Please check your installation and/or PYTHONPATH." % os.path.realpath(
- __file__)
+ print("Unable to locate the Tigger directory, it is not a parent of %s. Please check your installation and/or PYTHONPATH." % os.path.realpath(
+ __file__))
sys.exit(1)
sys.path.append(os.path.dirname(dirname))
try:
import Tigger
except:
- print "Unable to import the Tigger package from %s. Please check your installation and PYTHONPATH." % dirname
+ print("Unable to import the Tigger package from %s. Please check your installation and PYTHONPATH." % dirname)
sys.exit(1)
Tigger.nuke_matplotlib(); # don't let the door hit you in the ass, sucka
@@ -142,9 +142,9 @@ Saves the result to an LSM file given by -o/--output.
# load the model
model = Tigger.load(skymodel)
if not model.sources:
- print "Input model %s contains no sources" % skymodel
+ print("Input model %s contains no sources" % skymodel)
sys.exit(0)
- print "Input model contains %d sources" % len(model.sources)
+ print("Input model contains %d sources" % len(model.sources))
if options.transfer_tags:
fromlsm, tolerance = options.transfer_tags.split(":")
@@ -192,20 +192,20 @@ Saves the result to an LSM file given by -o/--output.
# if selection is not None, then we've already selected and tagged something, so we need
# to reset the selection to empty and start again. If selected_ids is None, this is the first selection
if selection is not None or selected_ids is None:
- print "Selecting sources:"
+ print("Selecting sources:")
selected_ids = set()
selection = None
# add to current selection
- selected_ids.update(map(id, sel))
+ selected_ids.update(list(map(id, sel)))
# print result
if not len(sel):
- print ' %-16s: no sources selected' % selstr
+ print(' %-16s: no sources selected' % selstr)
elif len(sel) == 1:
- print ' %-16s: one source selected (%s)' % (selstr, sel[0].name)
+ print(' %-16s: one source selected (%s)' % (selstr, sel[0].name))
elif len(sel) <= 5:
- print ' %-16s: %d sources selected (%s)' % (selstr, len(sel), " ".join([src.name for src in sel]))
+ print(' %-16s: %d sources selected (%s)' % (selstr, len(sel), " ".join([src.name for src in sel])))
else:
- print ' %-16s: %d sources selected' % (selstr, len(sel))
+ print(' %-16s: %d sources selected' % (selstr, len(sel)))
def retrieve_selection():
@@ -217,13 +217,13 @@ Saves the result to an LSM file given by -o/--output.
# no explicit selection: use entire model
if selected_ids is None:
selection = model.sources
- print "No explicit selection, using all sources."
+ print("No explicit selection, using all sources.")
# else use selected set
else:
selection = [src for src in model.sources if id(src) in selected_ids]
- print "Using %d selected sources:" % len(selection)
+ print("Using %d selected sources:" % len(selection))
if options.list:
- print "Sources: %s" % (" ".join([x.name for x in selection]))
+ print("Sources: %s" % (" ".join([x.name for x in selection])))
global listed
listed = True
return selection
@@ -246,7 +246,7 @@ Saves the result to an LSM file given by -o/--output.
for subobj in tags[:-1]:
src = getattr(src, subobj, None)
if src is None:
- print "Can't resolve attribute %s for source %s" % (tagname, src.name)
+ print("Can't resolve attribute %s for source %s" % (tagname, src.name))
sys.exit(1)
return src, tags[-1]
@@ -256,7 +256,7 @@ Saves the result to an LSM file given by -o/--output.
# Match either the SELTAG<>SELVAL, or the TAG=[TYPE:]VALUE, or the [+!/]TAG forms
# If none match, assume the NAME form
mselcomp = re.match("^(?i)([^=<>!.]+)(%s)([^dms]+)([dms])?" % "|".join(
- [key.replace('.', '\.') for key in select_predicates.keys()]), arg)
+ [key.replace('.', '\.') for key in list(select_predicates.keys())]), arg)
mseltag = re.match("=(.+)$", arg)
mset = re.match("^(.+)=((bool|int|str|float|complex):)?(.+)$", arg)
msetbool = re.match("^([+!/])(.+)$", arg)
@@ -280,7 +280,7 @@ Saves the result to an LSM file given by -o/--output.
elif mset:
sources = retrieve_selection()
if options.list:
- print "--list in effect, ignoring tagging commands"
+ print("--list in effect, ignoring tagging commands")
continue
tagname, typespec, typename, value = mset.groups()
# if type is specified, use it to explicitly convert the value
@@ -295,14 +295,14 @@ Saves the result to an LSM file given by -o/--output.
try:
newval = bool(int(value))
except:
- print "Can't parse \"%s\" as a value of type bool" % value
+ print("Can't parse \"%s\" as a value of type bool" % value)
sys.exit(2)
# else some other type is specified -- use it to convert the value
elif typename:
try:
newval = getattr(__builtin__, typename)(value)
except:
- print "Can't parse \"%s\" as a value of type %s" % (value, typename)
+ print("Can't parse \"%s\" as a value of type %s" % (value, typename))
sys.exit(2)
# else auto-convert
else:
@@ -317,30 +317,30 @@ Saves the result to an LSM file given by -o/--output.
if type(newval) is str:
value = '"%s"' % value
if sources:
- print " setting tag %s=%s (type '%s')" % (tagname, value, type(newval).__name__)
+ print(" setting tag %s=%s (type '%s')" % (tagname, value, type(newval).__name__))
for src in sources:
obj, tag = lookupObject(src, tagname)
obj.setAttribute(tag, newval)
modified = True
else:
- print "No sources selected, ignoring tagging commands"
+ print("No sources selected, ignoring tagging commands")
elif msetbool:
sources = retrieve_selection()
if options.list:
- print "--list in effect, ignoring tagging commands"
+ print("--list in effect, ignoring tagging commands")
continue
if sources:
op, tagname = msetbool.groups()
if op == "+":
- print " setting tag %s=True" % tagname
+ print(" setting tag %s=True" % tagname)
method = 'setAttribute'
args = (tagname, True)
elif op == "!":
- print " setting tag %s=False" % tagname
+ print(" setting tag %s=False" % tagname)
method = 'setAttribute'
args = (tagname, False)
elif op == "/":
- print " removing tag %s" % tagname
+ print(" removing tag %s" % tagname)
method = 'removeAttribute'
args = (tagname,)
for src in sources:
@@ -348,28 +348,28 @@ Saves the result to an LSM file given by -o/--output.
getattr(obj, method)(*args)
modified = True
else:
- print "No sources selected, ignoring tagging commands"
+ print("No sources selected, ignoring tagging commands")
if options.list:
if not listed:
retrieve_selection()
if not modified:
- print "Model was not modified"
+ print("Model was not modified")
sys.exit(0)
# prompt
if not options.force:
try:
- raw_input("Press ENTER to save model or Ctrl+C to cancel: ")
+ input("Press ENTER to save model or Ctrl+C to cancel: ")
except:
- print "Cancelling"
+ print("Cancelling")
sys.exit(1)
# save output
if options.output:
model.save(options.output)
- print "Saved updated model to %s" % options, output
+ print("Saved updated model to %s" % options, output)
else:
model.save(skymodel)
- print "Saved updated model"
+ print("Saved updated model")
diff --git a/setup.py b/setup.py
index 10a0280..101f340 100644
--- a/setup.py
+++ b/setup.py
@@ -22,7 +22,7 @@
setup(
- name ="astro-tigger-lsm",
+ name="astro-tigger-lsm",
version=__version__,
packages=find_packages(),
extras_require=extras_require,