text
stringlengths 81
112k
|
|---|
Determine the number of inputfiles provided by the user and the
number of those files that are association tables
Parameters
----------
inputlist : string
the user input
Returns
-------
numInputs: int
number of inputs provided by the user
numASNfiles: int
number of association files provided as input
def countinputs(inputlist):
"""
Determine the number of inputfiles provided by the user and the
number of those files that are association tables
Parameters
----------
inputlist : string
the user input
Returns
-------
numInputs: int
number of inputs provided by the user
numASNfiles: int
number of association files provided as input
"""
# Initialize return values
numInputs = 0
numASNfiles = 0
# User irafglob to count the number of inputfiles
files = irafglob(inputlist, atfile=None)
# Use the "len" ufunc to count the number of entries in the list
numInputs = len(files)
# Loop over the list and see if any of the entries are association files
for file in files:
if (checkASN(file) == True):
numASNfiles += 1
return numInputs,numASNfiles
|
show a summary of all projects
def summary(logfile, time_format):
"show a summary of all projects"
def output(summary):
width = max([len(p[0]) for p in summary]) + 3
print '\n'.join([
"%s%s%s" % (p[0], ' ' * (width - len(p[0])),
colored(minutes_to_txt(p[1]), 'red')) for p in summary])
output(server.summarize(read(logfile, time_format, only_elapsed=True)))
|
show current status
def status(logfile, time_format):
"show current status"
try:
r = read(logfile, time_format)[-1]
if r[1][1]:
return summary(logfile, time_format)
else:
print "working on %s" % colored(r[0], attrs=['bold'])
print " since %s" % colored(
server.date_to_txt(r[1][0], time_format), 'green')
print " to now, %s" % colored(
server.date_to_txt(now(), time_format), 'green')
print " => %s elapsed" % colored(time_elapsed(r[1][0]), 'red')
except IndexError:
return cmdapp.help()
|
start tracking for <project>
def start(project, logfile, time_format):
"start tracking for <project>"
records = read(logfile, time_format)
if records and not records[-1][1][1]:
print "error: there is a project already active"
return
write(server.start(project, records), logfile, time_format)
print "starting work on %s" % colored(project, attrs=['bold'])
print " at %s" % colored(server.date_to_txt(now(), time_format), 'green')
|
stop tracking for the active project
def stop(logfile, time_format):
"stop tracking for the active project"
def save_and_output(records):
records = server.stop(records)
write(records, logfile, time_format)
def output(r):
print "worked on %s" % colored(r[0], attrs=['bold'])
print " from %s" % colored(
server.date_to_txt(r[1][0], time_format), 'green')
print " to now, %s" % colored(
server.date_to_txt(r[1][1], time_format), 'green')
print " => %s elapsed" % colored(
time_elapsed(r[1][0], r[1][1]), 'red')
output(records[-1])
save_and_output(read(logfile, time_format))
|
parses a stream with text formatted as a Timed logfile and shows a summary
def parse(logfile, time_format):
"parses a stream with text formatted as a Timed logfile and shows a summary"
records = [server.record_from_txt(line, only_elapsed=True,
time_format=time_format) for line in sys.stdin.readlines()]
# TODO: make this code better.
def output(summary):
width = max([len(p[0]) for p in summary]) + 3
print '\n'.join([
"%s%s%s" % (p[0], ' ' * (width - len(p[0])),
colored(minutes_to_txt(p[1]), 'red')) for p in summary])
output(server.summarize(records))
|
prints a newline-separated list of all projects
def projects(logfile, time_format):
"prints a newline-separated list of all projects"
print '\n'.join(server.list_projects(read(logfile, time_format)))
|
Returns a formatted string with the current local time.
def getLTime():
"""Returns a formatted string with the current local time."""
_ltime = _time.localtime(_time.time())
tlm_str = _time.strftime('%H:%M:%S (%d/%m/%Y)', _ltime)
return tlm_str
|
Returns a formatted string with the current date.
def getDate():
"""Returns a formatted string with the current date."""
_ltime = _time.localtime(_time.time())
date_str = _time.strftime('%Y-%m-%dT%H:%M:%S',_ltime)
return date_str
|
Convert DATE string into a decimal year.
def convertDate(date):
"""Convert DATE string into a decimal year."""
d, t = date.split('T')
return decimal_date(d, timeobs=t)
|
Convert DATE-OBS (and optional TIME-OBS) into a decimal year.
def decimal_date(dateobs, timeobs=None):
"""Convert DATE-OBS (and optional TIME-OBS) into a decimal year."""
year, month, day = dateobs.split('-')
if timeobs is not None:
hr, min, sec = timeobs.split(':')
else:
hr, min, sec = 0, 0, 0
rdate = datetime.datetime(int(year), int(month), int(day), int(hr),
int(min), int(sec))
dday = (float(rdate.strftime("%j")) + rdate.hour / 24.0 +
rdate.minute / (60. * 24) + rdate.second / (3600 * 24.)) / 365.25
ddate = int(year) + dday
return ddate
|
Converts an integer 'input' into its component bit values as a list of
power of 2 integers.
For example, the bit value 1027 would return [1, 2, 1024]
def interpretDQvalue(input):
"""
Converts an integer 'input' into its component bit values as a list of
power of 2 integers.
For example, the bit value 1027 would return [1, 2, 1024]
"""
nbits = 16
# We will only support integer values up to 2**128
for iexp in [16, 32, 64, 128]:
# Find out whether the input value is less than 2**iexp
if (input // (2 ** iexp)) == 0:
# when it finally is, we have identified how many bits can be used to
# describe this input bitvalue
nbits = iexp
break
# Find out how 'dtype' values are described on this machine
a = np.zeros(1, dtype='int16')
atype_descr = a.dtype.descr[0][1]
# Use this description to build the description we need for our input integer
dtype_str = atype_descr[:2] + str(nbits // 8)
result = np.zeros(nbits + 1, dtype=dtype_str)
# For each bit, determine whether it has been set in the input value or not
for n in range(nbits + 1):
i = 2 ** n
if input & i > 0:
# record which bit has been set as the power-of-2 integer
result[n] = i
# Return the non-zero unique values as a Python list
return np.delete(np.unique(result), 0).tolist()
|
Returns
--------
isFits: tuple
An ``(isfits, fitstype)`` tuple. The values of ``isfits`` and
``fitstype`` are specified as:
- ``isfits``: True|False
- ``fitstype``: if True, one of 'waiver', 'mef', 'simple'; if False, None
Notes
-----
Input images which do not have a valid FITS filename will automatically
result in a return of (False, None).
In the case that the input has a valid FITS filename but runs into some
error upon opening, this routine will raise that exception for the calling
routine/user to handle.
def isFits(input):
"""
Returns
--------
isFits: tuple
An ``(isfits, fitstype)`` tuple. The values of ``isfits`` and
``fitstype`` are specified as:
- ``isfits``: True|False
- ``fitstype``: if True, one of 'waiver', 'mef', 'simple'; if False, None
Notes
-----
Input images which do not have a valid FITS filename will automatically
result in a return of (False, None).
In the case that the input has a valid FITS filename but runs into some
error upon opening, this routine will raise that exception for the calling
routine/user to handle.
"""
isfits = False
fitstype = None
names = ['fits', 'fit', 'FITS', 'FIT']
#determine if input is a fits file based on extension
# Only check type of FITS file if filename ends in valid FITS string
f = None
fileclose = False
if isinstance(input, fits.HDUList):
isfits = True
f = input
else:
isfits = True in [input.endswith(l) for l in names]
# if input is a fits file determine what kind of fits it is
#waiver fits len(shape) == 3
if isfits:
if f is None:
try:
f = fits.open(input, mode='readonly')
fileclose = True
except Exception:
if f is not None:
f.close()
raise
data0 = f[0].data
if data0 is not None:
try:
if isinstance(f[1], fits.TableHDU):
fitstype = 'waiver'
except IndexError:
fitstype = 'simple'
else:
fitstype = 'mef'
if fileclose:
f.close()
return isfits, fitstype
|
Checks whether files are writable. It is up to the calling routine to raise
an Exception, if desired.
This function returns True, if all files are writable and False, if any are
not writable. In addition, for all files found to not be writable, it will
print out the list of names of affected files.
def verifyWriteMode(files):
"""
Checks whether files are writable. It is up to the calling routine to raise
an Exception, if desired.
This function returns True, if all files are writable and False, if any are
not writable. In addition, for all files found to not be writable, it will
print out the list of names of affected files.
"""
# Start by insuring that input is a list of filenames,
# if only a single filename has been given as input,
# convert it to a list with len == 1.
if not isinstance(files, list):
files = [files]
# Keep track of the name of each file which is not writable
not_writable = []
writable = True
# Check each file in input list
for fname in files:
try:
f = open(fname,'a')
f.close()
del f
except:
not_writable.append(fname)
writable = False
if not writable:
print('The following file(s) do not have write permission!')
for fname in not_writable:
print(' ', fname)
return writable
|
Returns a comma-separated string of filter names extracted from the input
header (PyFITS header object). This function has been hard-coded to
support the following instruments:
ACS, WFPC2, STIS
This function relies on the 'INSTRUME' keyword to define what instrument
has been used to generate the observation/header.
The 'filternames' parameter allows the user to provide a list of keyword
names for their instrument, in the case their instrument is not supported.
def getFilterNames(header, filternames=None):
"""
Returns a comma-separated string of filter names extracted from the input
header (PyFITS header object). This function has been hard-coded to
support the following instruments:
ACS, WFPC2, STIS
This function relies on the 'INSTRUME' keyword to define what instrument
has been used to generate the observation/header.
The 'filternames' parameter allows the user to provide a list of keyword
names for their instrument, in the case their instrument is not supported.
"""
# Define the keyword names for each instrument
_keydict = {
'ACS': ['FILTER1', 'FILTER2'],
'WFPC2': ['FILTNAM1', 'FILTNAM2'],
'STIS': ['OPT_ELEM', 'FILTER'],
'NICMOS': ['FILTER', 'FILTER2'],
'WFC3': ['FILTER', 'FILTER2']
}
# Find out what instrument the input header came from, based on the
# 'INSTRUME' keyword
if 'INSTRUME' in header:
instrument = header['INSTRUME']
else:
raise ValueError('Header does not contain INSTRUME keyword.')
# Check to make sure this instrument is supported in _keydict
if instrument in _keydict:
_filtlist = _keydict[instrument]
else:
_filtlist = filternames
# At this point, we know what keywords correspond to the filter names
# in the header. Now, get the values associated with those keywords.
# Build a list of all filter name values, with the exception of the
# blank keywords. Values containing 'CLEAR' or 'N/A' are valid.
_filter_values = []
for _key in _filtlist:
if _key in header:
_val = header[_key]
else:
_val = ''
if _val.strip() != '':
_filter_values.append(header[_key])
# Return the comma-separated list
return ','.join(_filter_values)
|
Build rootname for a new file.
Use 'extn' for new filename if given, does NOT append a suffix/extension at
all.
Does NOT check to see if it exists already. Will ALWAYS return a new
filename.
def buildNewRootname(filename, extn=None, extlist=None):
"""
Build rootname for a new file.
Use 'extn' for new filename if given, does NOT append a suffix/extension at
all.
Does NOT check to see if it exists already. Will ALWAYS return a new
filename.
"""
# Search known suffixes to replace ('_crj.fits',...)
_extlist = copy.deepcopy(EXTLIST)
# Also, add a default where '_dth.fits' replaces
# whatever extension was there ('.fits','.c1h',...)
#_extlist.append('.')
# Also append any user-specified extensions...
if extlist:
_extlist += extlist
if isinstance(filename, fits.HDUList):
try:
filename = filename.filename()
except:
raise ValueError("Can't determine the filename of an waivered HDUList object.")
for suffix in _extlist:
_indx = filename.find(suffix)
if _indx > 0: break
if _indx < 0:
# default to entire rootname
_indx = len(filename)
if extn is None:
extn = ''
return filename[:_indx] + extn
|
Build a new rootname for an existing file and given extension.
Any user supplied extensions to use for searching for file need to be
provided as a list of extensions.
Examples
--------
::
>>> rootname = buildRootname(filename, ext=['_dth.fits']) # doctest: +SKIP
def buildRootname(filename, ext=None):
"""
Build a new rootname for an existing file and given extension.
Any user supplied extensions to use for searching for file need to be
provided as a list of extensions.
Examples
--------
::
>>> rootname = buildRootname(filename, ext=['_dth.fits']) # doctest: +SKIP
"""
if filename in ['' ,' ', None]:
return None
fpath, fname = os.path.split(filename)
if ext is not None and '_' in ext[0]:
froot = os.path.splitext(fname)[0].split('_')[0]
else:
froot = fname
if fpath in ['', ' ', None]:
fpath = os.curdir
# Get complete list of filenames from current directory
flist = os.listdir(fpath)
#First, assume given filename is complete and verify
# it exists...
rootname = None
for name in flist:
if name == froot:
rootname = froot
break
elif name == froot + '.fits':
rootname = froot + '.fits'
break
# If we have an incomplete filename, try building a default
# name and seeing if it exists...
#
# Set up default list of suffix/extensions to add to rootname
_extlist = []
for extn in EXTLIST:
_extlist.append(extn)
if rootname is None:
# Add any user-specified extension to list of extensions...
if ext is not None:
for i in ext:
_extlist.insert(0,i)
# loop over all extensions looking for a filename that matches...
for extn in _extlist:
# Start by looking for filename with exactly
# the same case a provided in ASN table...
rname = froot + extn
for name in flist:
if rname == name:
rootname = name
break
if rootname is None:
# Try looking for all lower-case filename
# instead of a mixed-case filename as required
# by the pipeline.
rname = froot.lower() + extn
for name in flist:
if rname == name:
rootname = name
break
if rootname is not None:
break
# If we still haven't found the file, see if we have the
# info to build one...
if rootname is None and ext is not None:
# Check to see if we have a full filename to start with...
_indx = froot.find('.')
if _indx > 0:
rootname = froot[:_indx] + ext[0]
else:
rootname = froot + ext[0]
if fpath not in ['.', '', ' ', None]:
rootname = os.path.join(fpath, rootname)
# It will be up to the calling routine to verify
# that a valid rootname, rather than 'None', was returned.
return rootname
|
General, write-safe method for returning a keyword value from the header of
a IRAF recognized image.
Returns the value as a string.
def getKeyword(filename, keyword, default=None, handle=None):
"""
General, write-safe method for returning a keyword value from the header of
a IRAF recognized image.
Returns the value as a string.
"""
# Insure that there is at least 1 extension specified...
if filename.find('[') < 0:
filename += '[0]'
_fname, _extn = parseFilename(filename)
if not handle:
# Open image whether it is FITS or GEIS
_fimg = openImage(_fname)
else:
# Use what the user provides, after insuring
# that it is a proper PyFITS object.
if isinstance(handle, fits.HDUList):
_fimg = handle
else:
raise ValueError('Handle must be %r object!' % fits.HDUList)
# Address the correct header
_hdr = getExtn(_fimg, _extn).header
try:
value = _hdr[keyword]
except KeyError:
_nextn = findKeywordExtn(_fimg, keyword)
try:
value = _fimg[_nextn].header[keyword]
except KeyError:
value = ''
if not handle:
_fimg.close()
del _fimg
if value == '':
if default is None:
value = None
else:
value = default
# NOTE: Need to clean up the keyword.. Occasionally the keyword value
# goes right up to the "/" FITS delimiter, and iraf.keypar is incapable
# of realizing this, so it incorporates "/" along with the keyword value.
# For example, after running "pydrizzle" on the image "j8e601bkq_flt.fits",
# the CD keywords look like this:
#
# CD1_1 = 9.221627430999639E-06/ partial of first axis coordinate w.r.t. x
# CD1_2 = -1.0346992614799E-05 / partial of first axis coordinate w.r.t. y
#
# so for CD1_1, iraf.keypar returns:
# "9.221627430999639E-06/"
#
# So, the following piece of code CHECKS for this and FIXES the string,
# very simply by removing the last character if it is a "/".
# This fix courtesy of Anton Koekemoer, 2002.
elif isinstance(value, string_types):
if value[-1:] == '/':
value = value[:-1]
return value
|
Return a copy of the PRIMARY header, along with any group/extension header
for this filename specification.
def getHeader(filename, handle=None):
"""
Return a copy of the PRIMARY header, along with any group/extension header
for this filename specification.
"""
_fname, _extn = parseFilename(filename)
# Allow the user to provide an already opened PyFITS object
# to derive the header from...
#
if not handle:
# Open image whether it is FITS or GEIS
_fimg = openImage(_fname, mode='readonly')
else:
# Use what the user provides, after insuring
# that it is a proper PyFITS object.
if isinstance(handle, fits.HDUList):
_fimg = handle
else:
raise ValueError('Handle must be a %r object!' % fits.HDUList)
_hdr = _fimg['PRIMARY'].header.copy()
# if the data is not in the primary array delete NAXIS
# so that the correct value is read from the extension header
if _hdr['NAXIS'] == 0:
del _hdr['NAXIS']
if not (_extn is None or (_extn.isdigit() and int(_extn) == 0)):
# Append correct extension/chip/group header to PRIMARY...
#for _card in getExtn(_fimg,_extn).header.ascard:
#_hdr.ascard.append(_card)
for _card in getExtn(_fimg, _extn).header.cards:
_hdr.append(_card)
if not handle:
# Close file handle now...
_fimg.close()
del _fimg
return _hdr
|
Add/update keyword to header with given value.
def updateKeyword(filename, key, value,show=yes):
"""Add/update keyword to header with given value."""
_fname, _extn = parseFilename(filename)
# Open image whether it is FITS or GEIS
_fimg = openImage(_fname, mode='update')
# Address the correct header
_hdr = getExtn(_fimg, _extn).header
# Assign a new value or add new keyword here.
try:
_hdr[key] = value
except KeyError:
if show:
print('Adding new keyword ', key, '=', value)
_hdr[key] = value
# Close image
_fimg.close()
del _fimg
|
Build a new FITS filename for a GEIS input image.
def buildFITSName(geisname):
"""Build a new FITS filename for a GEIS input image."""
# User wants to make a FITS copy and update it...
_indx = geisname.rfind('.')
_fitsname = geisname[:_indx] + '_' + geisname[_indx + 1:-1] + 'h.fits'
return _fitsname
|
Opens file and returns PyFITS object. Works on both FITS and GEIS
formatted images.
Notes
-----
If a GEIS or waivered FITS image is used as input, it will convert it to a
MEF object and only if ``writefits = True`` will write it out to a file. If
``fitsname = None``, the name used to write out the new MEF file will be
created using `buildFITSName`.
Parameters
----------
filename: str
name of input file
mode: str
mode for opening file based on PyFITS `mode` parameter values
memmap: bool
switch for using memory mapping, `False` for no, `True` for yes
writefits: bool
if `True`, will write out GEIS as multi-extension FITS
and return handle to that opened GEIS-derived MEF file
clobber: bool
overwrite previously written out GEIS-derived MEF file
fitsname: str
name to use for GEIS-derived MEF file,
if None and writefits==`True`, will use 'buildFITSName()' to generate one
def openImage(filename, mode='readonly', memmap=False, writefits=True,
clobber=True, fitsname=None):
"""
Opens file and returns PyFITS object. Works on both FITS and GEIS
formatted images.
Notes
-----
If a GEIS or waivered FITS image is used as input, it will convert it to a
MEF object and only if ``writefits = True`` will write it out to a file. If
``fitsname = None``, the name used to write out the new MEF file will be
created using `buildFITSName`.
Parameters
----------
filename: str
name of input file
mode: str
mode for opening file based on PyFITS `mode` parameter values
memmap: bool
switch for using memory mapping, `False` for no, `True` for yes
writefits: bool
if `True`, will write out GEIS as multi-extension FITS
and return handle to that opened GEIS-derived MEF file
clobber: bool
overwrite previously written out GEIS-derived MEF file
fitsname: str
name to use for GEIS-derived MEF file,
if None and writefits==`True`, will use 'buildFITSName()' to generate one
"""
if not isinstance(filename, fits.HDUList):
# Insure that the filename is always fully expanded
# This will not affect filenames without paths or
# filenames specified with extensions.
filename = osfn(filename)
# Extract the rootname and extension specification
# from input image name
_fname, _iextn = parseFilename(filename)
else:
_fname = filename
# Check whether we have a FITS file and if so what type
isfits, fitstype = isFits(_fname)
if isfits:
if fitstype != 'waiver':
# Open the FITS file
fimg = fits.open(_fname, mode=mode, memmap=memmap)
return fimg
else:
fimg = convertwaiveredfits.convertwaiveredfits(_fname)
#check for the existence of a data quality file
_dqname = buildNewRootname(_fname, extn='_c1f.fits')
dqexists = os.path.exists(_dqname)
if dqexists:
try:
dqfile = convertwaiveredfits.convertwaiveredfits(_dqname)
dqfitsname = buildNewRootname(_dqname, extn='_c1h.fits')
except:
print("Could not read data quality file %s" % _dqname)
if writefits:
# User wants to make a FITS copy and update it
# using the filename they have provided
if fitsname is None:
rname = buildNewRootname(_fname)
fitsname = buildNewRootname(rname, extn='_c0h.fits')
# Write out GEIS image as multi-extension FITS.
fexists = os.path.exists(fitsname)
if (fexists and clobber) or not fexists:
print('Writing out WAIVERED as MEF to ', fitsname)
if ASTROPY_VER_GE13:
fimg.writeto(fitsname, overwrite=clobber)
else:
fimg.writeto(fitsname, clobber=clobber)
if dqexists:
print('Writing out WAIVERED as MEF to ', dqfitsname)
if ASTROPY_VER_GE13:
dqfile.writeto(dqfitsname, overwrite=clobber)
else:
dqfile.writeto(dqfitsname, clobber=clobber)
# Now close input GEIS image, and open writable
# handle to output FITS image instead...
fimg.close()
del fimg
# Image re-written as MEF, now it needs its WCS updated
#updatewcs.updatewcs(fitsname)
fimg = fits.open(fitsname, mode=mode, memmap=memmap)
# Return handle for use by user
return fimg
else:
# Input was specified as a GEIS image, but no FITS copy
# exists. Read it in with 'readgeis' and make a copy
# then open the FITS copy...
try:
# Open as a GEIS image for reading only
fimg = readgeis.readgeis(_fname)
except:
raise IOError("Could not open GEIS input: %s" % _fname)
#check for the existence of a data quality file
_dqname = buildNewRootname(_fname, extn='.c1h')
dqexists = os.path.exists(_dqname)
if dqexists:
try:
dqfile = readgeis.readgeis(_dqname)
dqfitsname = buildFITSName(_dqname)
except:
print("Could not read data quality file %s" % _dqname)
# Check to see if user wanted to update GEIS header.
# or write out a multi-extension FITS file and return a handle to it
if writefits:
# User wants to make a FITS copy and update it
# using the filename they have provided
if fitsname is None:
fitsname = buildFITSName(_fname)
# Write out GEIS image as multi-extension FITS.
fexists = os.path.exists(fitsname)
if (fexists and clobber) or not fexists:
print('Writing out GEIS as MEF to ', fitsname)
if ASTROPY_VER_GE13:
fimg.writeto(fitsname, overwrite=clobber)
else:
fimg.writeto(fitsname, clobber=clobber)
if dqexists:
print('Writing out GEIS as MEF to ', dqfitsname)
if ASTROPY_VER_GE13:
dqfile.writeto(dqfitsname, overwrite=clobber)
else:
dqfile.writeto(dqfitsname, clobber=clobber)
# Now close input GEIS image, and open writable
# handle to output FITS image instead...
fimg.close()
del fimg
# Image re-written as MEF, now it needs its WCS updated
#updatewcs.updatewcs(fitsname)
fimg = fits.open(fitsname, mode=mode, memmap=memmap)
# Return handle for use by user
return fimg
|
Parse out filename from any specified extensions.
Returns rootname and string version of extension name.
def parseFilename(filename):
"""
Parse out filename from any specified extensions.
Returns rootname and string version of extension name.
"""
# Parse out any extension specified in filename
_indx = filename.find('[')
if _indx > 0:
# Read extension name provided
_fname = filename[:_indx]
_extn = filename[_indx + 1:-1]
else:
_fname = filename
_extn = None
return _fname, _extn
|
Parse a string representing a qualified fits extension name as in the
output of `parseFilename` and return a tuple ``(str(extname),
int(extver))``, which can be passed to `astropy.io.fits` functions using
the 'ext' kw.
Default return is the first extension in a fits file.
Examples
--------
::
>>> parseExtn('sci, 2')
('sci', 2)
>>> parseExtn('2')
('', 2)
>>> parseExtn('sci')
('sci', 1)
def parseExtn(extn=None):
"""
Parse a string representing a qualified fits extension name as in the
output of `parseFilename` and return a tuple ``(str(extname),
int(extver))``, which can be passed to `astropy.io.fits` functions using
the 'ext' kw.
Default return is the first extension in a fits file.
Examples
--------
::
>>> parseExtn('sci, 2')
('sci', 2)
>>> parseExtn('2')
('', 2)
>>> parseExtn('sci')
('sci', 1)
"""
if not extn:
return ('', 0)
try:
lext = extn.split(',')
except:
return ('', 1)
if len(lext) == 1 and lext[0].isdigit():
return ("", int(lext[0]))
elif len(lext) == 2:
return (lext[0], int(lext[1]))
else:
return (lext[0], 1)
|
Return the number of 'extname' extensions, defaulting to counting the
number of SCI extensions.
def countExtn(fimg, extname='SCI'):
"""
Return the number of 'extname' extensions, defaulting to counting the
number of SCI extensions.
"""
closefits = False
if isinstance(fimg, string_types):
fimg = fits.open(fimg)
closefits = True
n = 0
for e in fimg:
if 'extname' in e.header and e.header['extname'] == extname:
n += 1
if closefits:
fimg.close()
return n
|
Returns the PyFITS extension corresponding to extension specified in
filename.
Defaults to returning the first extension with data or the primary
extension, if none have data. If a non-existent extension has been
specified, it raises a `KeyError` exception.
def getExtn(fimg, extn=None):
"""
Returns the PyFITS extension corresponding to extension specified in
filename.
Defaults to returning the first extension with data or the primary
extension, if none have data. If a non-existent extension has been
specified, it raises a `KeyError` exception.
"""
# If no extension is provided, search for first extension
# in FITS file with data associated with it.
if extn is None:
# Set up default to point to PRIMARY extension.
_extn = fimg[0]
# then look for first extension with data.
for _e in fimg:
if _e.data is not None:
_extn = _e
break
else:
# An extension was provided, so parse it out...
if repr(extn).find(',') > 1:
if isinstance(extn, tuple):
# We have a tuple possibly created by parseExtn(), so
# turn it into a list for easier manipulation.
_extns = list(extn)
if '' in _extns:
_extns.remove('')
else:
_extns = extn.split(',')
# Two values given for extension:
# for example, 'sci,1' or 'dq,1'
try:
_extn = fimg[_extns[0], int(_extns[1])]
except KeyError:
_extn = None
for e in fimg:
hdr = e.header
if ('extname' in hdr and
hdr['extname'].lower() == _extns[0].lower() and
hdr['extver'] == int(_extns[1])):
_extn = e
break
elif repr(extn).find('/') > 1:
# We are working with GEIS group syntax
_indx = str(extn[:extn.find('/')])
_extn = fimg[int(_indx)]
elif isinstance(extn, string_types):
if extn.strip() == '':
_extn = None # force error since invalid name was provided
# Only one extension value specified...
elif extn.isdigit():
# We only have an extension number specified as a string...
_nextn = int(extn)
else:
# We only have EXTNAME specified...
_nextn = None
if extn.lower() == 'primary':
_nextn = 0
else:
i = 0
for hdu in fimg:
isimg = 'extname' in hdu.header
hdr = hdu.header
if isimg and extn.lower() == hdr['extname'].lower():
_nextn = i
break
i += 1
if _nextn < len(fimg):
_extn = fimg[_nextn]
else:
_extn = None
else:
# Only integer extension number given, or default of 0 is used.
if int(extn) < len(fimg):
_extn = fimg[int(extn)]
else:
_extn = None
if _extn is None:
raise KeyError('Extension %s not found' % extn)
return _extn
|
Search a directory for full filename with optional path.
def findFile(input):
"""Search a directory for full filename with optional path."""
# If no input name is provided, default to returning 'no'(FALSE)
if not input:
return no
# We use 'osfn' here to insure that any IRAF variables are
# expanded out before splitting out the path...
_fdir, _fname = os.path.split(osfn(input))
if _fdir == '':
_fdir = os.curdir
try:
flist = os.listdir(_fdir)
except OSError:
# handle when requested file in on a disconnect network store
return no
_root, _extn = parseFilename(_fname)
found = no
for name in flist:
if name == _root:
# Check to see if given extension, if any, exists
if _extn is None:
found = yes
continue
else:
_split = _extn.split(',')
_extnum = None
_extver = None
if _split[0].isdigit():
_extname = None
_extnum = int(_split[0])
else:
_extname = _split[0]
if len(_split) > 1:
_extver = int(_split[1])
else:
_extver = 1
f = openImage(_root)
f.close()
if _extnum is not None:
if _extnum < len(f):
found = yes
del f
continue
else:
del f
else:
_fext = findExtname(f, _extname, extver=_extver)
if _fext is not None:
found = yes
del f
continue
return found
|
Checks to see if file specified exists in current or specified directory.
Default is current directory. Returns 1 if it exists, 0 if not found.
def checkFileExists(filename, directory=None):
"""
Checks to see if file specified exists in current or specified directory.
Default is current directory. Returns 1 if it exists, 0 if not found.
"""
if directory is not None:
fname = os.path.join(directory,filename)
else:
fname = filename
_exist = os.path.exists(fname)
return _exist
|
Copy a file whole from input to output.
def copyFile(input, output, replace=None):
"""Copy a file whole from input to output."""
_found = findFile(output)
if not _found or (_found and replace):
shutil.copy2(input, output)
|
Utility function for deleting a list of files or a single file.
This function will automatically delete both files of a GEIS image, just
like 'iraf.imdelete'.
def removeFile(inlist):
"""
Utility function for deleting a list of files or a single file.
This function will automatically delete both files of a GEIS image, just
like 'iraf.imdelete'.
"""
if not isinstance(inlist, string_types):
# We do have a list, so delete all filenames in list.
# Treat like a list of full filenames
_ldir = os.listdir('.')
for f in inlist:
# Now, check to see if there are wildcards which need to be expanded
if f.find('*') >= 0 or f.find('?') >= 0:
# We have a wild card specification
regpatt = f.replace('?', '.?')
regpatt = regpatt.replace('*', '.*')
_reg = re.compile(regpatt)
for file in _ldir:
if _reg.match(file):
_remove(file)
else:
# This is just a single filename
_remove(f)
else:
# It must be a string then, so treat as a single filename
_remove(inlist)
|
This function will return the index of the extension in a multi-extension
FITS file which contains the desired keyword with the given value.
def findKeywordExtn(ft, keyword, value=None):
"""
This function will return the index of the extension in a multi-extension
FITS file which contains the desired keyword with the given value.
"""
i = 0
extnum = -1
# Search through all the extensions in the FITS object
for chip in ft:
hdr = chip.header
# Check to make sure the extension has the given keyword
if keyword in hdr:
if value is not None:
# If it does, then does the value match the desired value
# MUST use 'str.strip' to match against any input string!
if hdr[keyword].strip() == value:
extnum = i
break
else:
extnum = i
break
i += 1
# Return the index of the extension which contained the
# desired EXTNAME value.
return extnum
|
Returns the list number of the extension corresponding to EXTNAME given.
def findExtname(fimg, extname, extver=None):
"""
Returns the list number of the extension corresponding to EXTNAME given.
"""
i = 0
extnum = None
for chip in fimg:
hdr = chip.header
if 'EXTNAME' in hdr:
if hdr['EXTNAME'].strip() == extname.upper():
if extver is None or hdr['EXTVER'] == extver:
extnum = i
break
i += 1
return extnum
|
Returns the next non-blank line in an ASCII file.
def rAsciiLine(ifile):
"""Returns the next non-blank line in an ASCII file."""
_line = ifile.readline().strip()
while len(_line) == 0:
_line = ifile.readline().strip()
return _line
|
List IRAF variables.
def listVars(prefix="", equals="\t= ", **kw):
"""List IRAF variables."""
keylist = getVarList()
if len(keylist) == 0:
print('No IRAF variables defined')
else:
keylist.sort()
for word in keylist:
print("%s%s%s%s" % (prefix, word, equals, envget(word)))
|
Undo Python conversion of CL parameter or variable name.
def untranslateName(s):
"""Undo Python conversion of CL parameter or variable name."""
s = s.replace('DOT', '.')
s = s.replace('DOLLAR', '$')
# delete 'PY' at start of name components
if s[:2] == 'PY': s = s[2:]
s = s.replace('.PY', '.')
return s
|
Get value of IRAF or OS environment variable.
def envget(var, default=None):
"""Get value of IRAF or OS environment variable."""
if 'pyraf' in sys.modules:
#ONLY if pyraf is already loaded, import iraf into the namespace
from pyraf import iraf
else:
# else set iraf to None so it knows to not use iraf's environment
iraf = None
try:
if iraf:
return iraf.envget(var)
else:
raise KeyError
except KeyError:
try:
return _varDict[var]
except KeyError:
try:
return os.environ[var]
except KeyError:
if default is not None:
return default
elif var == 'TERM':
# Return a default value for TERM
# TERM gets caught as it is found in the default
# login.cl file setup by IRAF.
print("Using default TERM value for session.")
return 'xterm'
else:
raise KeyError("Undefined environment variable `%s'" % var)
|
Convert IRAF virtual path name to OS pathname.
def osfn(filename):
"""Convert IRAF virtual path name to OS pathname."""
# Try to emulate the CL version closely:
#
# - expands IRAF virtual file names
# - strips blanks around path components
# - if no slashes or relative paths, return relative pathname
# - otherwise return absolute pathname
if filename is None:
return filename
ename = Expand(filename)
dlist = [part.strip() for part in ename.split(os.sep)]
if len(dlist) == 1 and dlist[0] not in [os.curdir, os.pardir]:
return dlist[0]
# I use str.join instead of os.path.join here because
# os.path.join("","") returns "" instead of "/"
epath = os.sep.join(dlist)
fname = os.path.abspath(epath)
# append '/' if relative directory was at end or filename ends with '/'
if fname[-1] != os.sep and dlist[-1] in ['', os.curdir, os.pardir]:
fname = fname + os.sep
return fname
|
Returns true if CL variable is defined.
def defvar(varname):
"""Returns true if CL variable is defined."""
if 'pyraf' in sys.modules:
#ONLY if pyraf is already loaded, import iraf into the namespace
from pyraf import iraf
else:
# else set iraf to None so it knows to not use iraf's environment
iraf = None
if iraf:
_irafdef = iraf.envget(varname)
else:
_irafdef = 0
return varname in _varDict or varname in os.environ or _irafdef
|
Set IRAF environment variables.
def set(*args, **kw):
"""Set IRAF environment variables."""
if len(args) == 0:
if len(kw) != 0:
# normal case is only keyword,value pairs
for keyword, value in kw.items():
keyword = untranslateName(keyword)
svalue = str(value)
_varDict[keyword] = svalue
else:
# set with no arguments lists all variables (using same format
# as IRAF)
listVars(prefix=" ", equals="=")
else:
# The only other case allowed is the peculiar syntax
# 'set @filename', which only gets used in the zzsetenv.def file,
# where it reads extern.pkg. That file also gets read (in full cl
# mode) by clpackage.cl. I get errors if I read this during
# zzsetenv.def, so just ignore it here...
#
# Flag any other syntax as an error.
if (len(args) != 1 or len(kw) != 0 or
not isinstance(args[0], string_types) or args[0][:1] != '@'):
raise SyntaxError("set requires name=value pairs")
|
Print value of IRAF or OS environment variables.
def show(*args, **kw):
"""Print value of IRAF or OS environment variables."""
if len(kw):
raise TypeError('unexpected keyword argument: %r' % list(kw))
if args:
for arg in args:
print(envget(arg))
else:
# print them all
listVars(prefix=" ", equals="=")
|
Unset IRAF environment variables.
This is not a standard IRAF task, but it is obviously useful. It makes the
resulting variables undefined. It silently ignores variables that are not
defined. It does not change the os environment variables.
def unset(*args, **kw):
"""
Unset IRAF environment variables.
This is not a standard IRAF task, but it is obviously useful. It makes the
resulting variables undefined. It silently ignores variables that are not
defined. It does not change the os environment variables.
"""
if len(kw) != 0:
raise SyntaxError("unset requires a list of variable names")
for arg in args:
if arg in _varDict:
del _varDict[arg]
|
Expand a string with embedded IRAF variables (IRAF virtual filename).
Allows comma-separated lists. Also uses os.path.expanduser to replace '~'
symbols.
Set the noerror flag to silently replace undefined variables with just the
variable name or null (so Expand('abc$def') = 'abcdef' and
Expand('(abc)def') = 'def'). This is the IRAF behavior, though it is
confusing and hides errors.
def Expand(instring, noerror=0):
"""
Expand a string with embedded IRAF variables (IRAF virtual filename).
Allows comma-separated lists. Also uses os.path.expanduser to replace '~'
symbols.
Set the noerror flag to silently replace undefined variables with just the
variable name or null (so Expand('abc$def') = 'abcdef' and
Expand('(abc)def') = 'def'). This is the IRAF behavior, though it is
confusing and hides errors.
"""
# call _expand1 for each entry in comma-separated list
wordlist = instring.split(",")
outlist = []
for word in wordlist:
outlist.append(os.path.expanduser(_expand1(word, noerror=noerror)))
return ",".join(outlist)
|
Expand a string with embedded IRAF variables (IRAF virtual filename).
def _expand1(instring, noerror):
"""Expand a string with embedded IRAF variables (IRAF virtual filename)."""
# first expand names in parentheses
# note this works on nested names too, expanding from the
# inside out (just like IRAF)
mm = __re_var_paren.search(instring)
while mm is not None:
# remove embedded dollar signs from name
varname = mm.group('varname').replace('$','')
if defvar(varname):
varname = envget(varname)
elif noerror:
varname = ""
else:
raise ValueError("Undefined variable `%s' in string `%s'" %
(varname, instring))
instring = instring[:mm.start()] + varname + instring[mm.end():]
mm = __re_var_paren.search(instring)
# now expand variable name at start of string
mm = __re_var_match.match(instring)
if mm is None:
return instring
varname = mm.group('varname')
if varname in ['', ' ', None]:
mm = __re_var_match2.match(instring)
varname = mm.group('varname')
if defvar(varname):
# recursively expand string after substitution
return _expand1(envget(varname) + instring[mm.end():], noerror)
elif noerror:
return _expand1(varname + instring[mm.end():], noerror)
else:
raise ValueError("Undefined variable `%s' in string `%s'" %
(varname, instring))
|
Check if this is a legal date in the Julian calendar
def legal_date(year, month, day):
'''Check if this is a legal date in the Julian calendar'''
daysinmonth = month_length(year, month)
if not (0 < day <= daysinmonth):
raise ValueError("Month {} doesn't have a day {}".format(month, day))
return True
|
Calculate Julian calendar date from Julian day
def from_jd(jd):
'''Calculate Julian calendar date from Julian day'''
jd += 0.5
z = trunc(jd)
a = z
b = a + 1524
c = trunc((b - 122.1) / 365.25)
d = trunc(365.25 * c)
e = trunc((b - d) / 30.6001)
if trunc(e < 14):
month = e - 1
else:
month = e - 13
if trunc(month > 2):
year = c - 4716
else:
year = c - 4715
day = b - d - trunc(30.6001 * e)
return (year, month, day)
|
Convert to Julian day using astronomical years (0 = 1 BC, -1 = 2 BC)
def to_jd(year, month, day):
'''Convert to Julian day using astronomical years (0 = 1 BC, -1 = 2 BC)'''
legal_date(year, month, day)
# Algorithm as given in Meeus, Astronomical Algorithms, Chapter 7, page 61
if month <= 2:
year -= 1
month += 12
return (trunc((365.25 * (year + 4716))) + trunc((30.6001 * (month + 1))) + day) - 1524.5
|
Test for delay of start of new year and to avoid
def delay_1(year):
'''Test for delay of start of new year and to avoid'''
# Sunday, Wednesday, and Friday as start of the new year.
months = trunc(((235 * year) - 234) / 19)
parts = 12084 + (13753 * months)
day = trunc((months * 29) + parts / 25920)
if ((3 * (day + 1)) % 7) < 3:
day += 1
return day
|
Check for delay in start of new year due to length of adjacent years
def delay_2(year):
'''Check for delay in start of new year due to length of adjacent years'''
last = delay_1(year - 1)
present = delay_1(year)
next_ = delay_1(year + 1)
if next_ - present == 356:
return 2
elif present - last == 382:
return 1
else:
return 0
|
How many days are in a given month of a given year
def month_days(year, month):
'''How many days are in a given month of a given year'''
if month > 13:
raise ValueError("Incorrect month index")
# First of all, dispose of fixed-length 29 day months
if month in (IYYAR, TAMMUZ, ELUL, TEVETH, VEADAR):
return 29
# If it's not a leap year, Adar has 29 days
if month == ADAR and not leap(year):
return 29
# If it's Heshvan, days depend on length of year
if month == HESHVAN and (year_days(year) % 10) != 5:
return 29
# Similarly, Kislev varies with the length of year
if month == KISLEV and (year_days(year) % 10) == 3:
return 29
# Nope, it's a 30 day month
return 30
|
Input GEIS files "input" will be read and converted to a new GEIS file
whose byte-order has been swapped from its original state.
Parameters
----------
input - str
Full filename with path of input GEIS image header file
output - str
Full filename with path of output GEIS image header file
If None, a default name will be created as input_swap.??h
clobber - bool
Overwrite any pre-existing output file? [Default: True]
Notes
-----
This function will automatically read and write out the data file using the
GEIS image naming conventions.
def byteswap(input,output=None,clobber=True):
"""Input GEIS files "input" will be read and converted to a new GEIS file
whose byte-order has been swapped from its original state.
Parameters
----------
input - str
Full filename with path of input GEIS image header file
output - str
Full filename with path of output GEIS image header file
If None, a default name will be created as input_swap.??h
clobber - bool
Overwrite any pre-existing output file? [Default: True]
Notes
-----
This function will automatically read and write out the data file using the
GEIS image naming conventions.
"""
global dat
cardLen = fits.Card.length
# input file(s) must be of the form *.??h and *.??d
if input[-1] != 'h' or input[-4] != '.':
raise "Illegal input GEIS file name %s" % input
data_file = input[:-1]+'d'
# Create default output name if no output name was specified by the user
if output is None:
output = input.replace('.','_swap.')
out_data = output[:-1]+'d'
if os.path.exists(output) and not clobber:
errstr = 'Output file already exists! Please remove or rename and start again...'
raise IOError(errstr)
_os = sys.platform
if _os[:5] == 'linux' or _os[:5] == 'win32' or _os[:5] == 'sunos' or _os[:3] == 'osf' or _os[:6] == 'darwin':
bytes_per_line = cardLen+1
else:
raise "Platform %s is not supported (yet)." % _os
end_card = 'END'+' '* (cardLen-3)
# open input file
im = open(input)
# Generate the primary HDU so we can have access to keywords which describe
# the number of groups and shape of each group's array
#
cards = []
while 1:
line = im.read(bytes_per_line)[:cardLen]
line = line[:8].upper() + line[8:]
if line == end_card:
break
cards.append(fits.Card.fromstring(line))
phdr = fits.Header(cards)
im.close()
_naxis0 = phdr.get('NAXIS', 0)
_naxis = [phdr['NAXIS'+str(j)] for j in range(1, _naxis0+1)]
_naxis.insert(0, _naxis0)
_bitpix = phdr['BITPIX']
_psize = phdr['PSIZE']
if phdr['DATATYPE'][:4] == 'REAL':
_bitpix = -_bitpix
if _naxis0 > 0:
size = reduce(lambda x,y:x*y, _naxis[1:])
data_size = abs(_bitpix) * size // 8
else:
data_size = 0
group_size = data_size + _psize // 8
# decode the group parameter definitions,
# group parameters will become extension header
groups = phdr['GROUPS']
gcount = phdr['GCOUNT']
pcount = phdr['PCOUNT']
formats = []
bools = []
floats = []
_range = list(range(1, pcount+1))
key = [phdr['PTYPE'+str(j)] for j in _range]
comm = [phdr.cards['PTYPE'+str(j)].comment for j in _range]
# delete group parameter definition header keywords
_list = ['PTYPE'+str(j) for j in _range] + \
['PDTYPE'+str(j) for j in _range] + \
['PSIZE'+str(j) for j in _range] + \
['DATATYPE', 'PSIZE', 'GCOUNT', 'PCOUNT', 'BSCALE', 'BZERO']
# Construct record array formats for the group parameters
# as interpreted from the Primary header file
for i in range(1, pcount+1):
ptype = key[i-1]
pdtype = phdr['PDTYPE'+str(i)]
star = pdtype.find('*')
_type = pdtype[:star]
_bytes = pdtype[star+1:]
# collect boolean keywords since they need special attention later
if _type == 'LOGICAL':
bools.append(i)
if pdtype == 'REAL*4':
floats.append(i)
fmt = geis_fmt[_type] + _bytes
formats.append((ptype,fmt))
_shape = _naxis[1:]
_shape.reverse()
_code = fits.BITPIX2DTYPE[_bitpix]
_bscale = phdr.get('BSCALE', 1)
_bzero = phdr.get('BZERO', 0)
if phdr['DATATYPE'][:10] == 'UNSIGNED*2':
_uint16 = 1
_bzero = 32768
else:
_uint16 = 0
# Use copy-on-write for all data types since byteswap may be needed
# in some platforms.
f1 = open(data_file, mode='rb')
dat = f1.read()
f1.close()
errormsg = ""
loc = 0
outdat = b''
for k in range(gcount):
ext_dat = numpy.fromstring(dat[loc:loc+data_size], dtype=_code)
ext_dat = ext_dat.reshape(_shape).byteswap()
outdat += ext_dat.tostring()
ext_hdu = fits.hdu.ImageHDU(data=ext_dat)
rec = numpy.fromstring(dat[loc+data_size:loc+group_size], dtype=formats).byteswap()
outdat += rec.tostring()
loc += group_size
if os.path.exists(output):
os.remove(output)
if os.path.exists(out_data):
os.remove(out_data)
shutil.copy(input,output)
outfile = open(out_data,mode='wb')
outfile.write(outdat)
outfile.close()
print('Finished byte-swapping ',input,' to ',output)
#-------------------------------------------------------------------------------
"""Input GEIS files "input" will be read and a HDUList object will
be returned that matches the waiver-FITS format written out by 'stwfits' in IRAF.
The user can use the writeto method to write the HDUList object to
a FITS file.
"""
# global dat # !!! (looks like this is a function missing its head)
cardLen = fits.Card.length
# input file(s) must be of the form *.??h and *.??d
if input[-1] != 'h' or input[-4] != '.':
raise "Illegal input GEIS file name %s" % input
data_file = input[:-1]+'d'
_os = sys.platform
if _os[:5] == 'linux' or _os[:5] == 'win32' or _os[:5] == 'sunos' or _os[:3] == 'osf' or _os[:6] == 'darwin':
bytes_per_line = cardLen+1
else:
raise "Platform %s is not supported (yet)." % _os
end_card = 'END'+' '* (cardLen-3)
# open input file
im = open(input)
# Generate the primary HDU
cards = []
while 1:
line = im.read(bytes_per_line)[:cardLen]
line = line[:8].upper() + line[8:]
if line == end_card:
break
cards.append(fits.Card.fromstring(line))
phdr = fits.Header(cards)
im.close()
phdr.set('FILENAME', value=input, after='DATE')
# Determine starting point for adding Group Parameter Block keywords to Primary header
phdr_indx = phdr.index('PSIZE')
_naxis0 = phdr.get('NAXIS', 0)
_naxis = [phdr['NAXIS'+str(j)] for j in range(1, _naxis0+1)]
_naxis.insert(0, _naxis0)
_bitpix = phdr['BITPIX']
_psize = phdr['PSIZE']
if phdr['DATATYPE'][:4] == 'REAL':
_bitpix = -_bitpix
if _naxis0 > 0:
size = reduce(lambda x,y:x*y, _naxis[1:])
data_size = abs(_bitpix) * size // 8
else:
data_size = 0
group_size = data_size + _psize // 8
# decode the group parameter definitions,
# group parameters will become extension table
groups = phdr['GROUPS']
gcount = phdr['GCOUNT']
pcount = phdr['PCOUNT']
formats = []
bools = []
floats = []
cols = [] # column definitions used for extension table
cols_dict = {} # provides name access to Column defs
_range = list(range(1, pcount+1))
key = [phdr['PTYPE'+str(j)] for j in _range]
comm = [phdr.cards['PTYPE'+str(j)].comment for j in _range]
# delete group parameter definition header keywords
_list = ['PTYPE'+str(j) for j in _range] + \
['PDTYPE'+str(j) for j in _range] + \
['PSIZE'+str(j) for j in _range] + \
['DATATYPE', 'PSIZE', 'GCOUNT', 'PCOUNT', 'BSCALE', 'BZERO']
# Construct record array formats for the group parameters
# as interpreted from the Primary header file
for i in range(1, pcount+1):
ptype = key[i-1]
pdtype = phdr['PDTYPE'+str(i)]
star = pdtype.find('*')
_type = pdtype[:star]
_bytes = pdtype[star+1:]
# collect boolean keywords since they need special attention later
if _type == 'LOGICAL':
bools.append(i)
if pdtype == 'REAL*4':
floats.append(i)
# identify keywords which require conversion to special units
if ptype in kw_DOUBLE:
_type = 'DOUBLE'
fmt = geis_fmt[_type] + _bytes
formats.append((ptype,fmt))
# Set up definitions for use in creating the group-parameter block table
nrpt = ''
nbits = str(int(_bytes)*8)
if 'CHAR' in _type:
nrpt = _bytes
nbits = _bytes
afmt = cols_fmt[_type]+ nbits
if 'LOGICAL' in _type:
afmt = cols_fmt[_type]
cfmt = cols_pfmt[_type]+nrpt
#print 'Column format for ',ptype,': ',cfmt,' with dtype of ',afmt
cols_dict[ptype] = fits.Column(name=ptype,format=cfmt,array=numpy.zeros(gcount,dtype=afmt))
cols.append(cols_dict[ptype]) # This keeps the columns in order
_shape = _naxis[1:]
_shape.reverse()
_code = fits.BITPIX2DTYPE[_bitpix]
_bscale = phdr.get('BSCALE', 1)
_bzero = phdr.get('BZERO', 0)
if phdr['DATATYPE'][:10] == 'UNSIGNED*2':
_uint16 = 1
_bzero = 32768
else:
_uint16 = 0
# delete from the end, so it will not conflict with previous delete
for i in range(len(phdr)-1, -1, -1):
if phdr.cards[i].keyword in _list:
del phdr[i]
# clean up other primary header keywords
phdr['SIMPLE'] = True
phdr['GROUPS'] = False
_after = 'NAXIS'
if _naxis0 > 0:
_after += str(_naxis0)
phdr.set('EXTEND', value=True,
comment="FITS dataset may contain extensions",
after=_after)
# Use copy-on-write for all data types since byteswap may be needed
# in some platforms.
f1 = open(data_file, mode='rb')
dat = f1.read()
errormsg = ""
# Define data array for all groups
arr_shape = _naxis[:]
arr_shape[0] = gcount
arr_stack = numpy.zeros(arr_shape,dtype=_code)
loc = 0
for k in range(gcount):
ext_dat = numpy.fromstring(dat[loc:loc+data_size], dtype=_code)
ext_dat = ext_dat.reshape(_shape)
if _uint16:
ext_dat += _bzero
# Check to see whether there are any NaN's or infs which might indicate
# a byte-swapping problem, such as being written out on little-endian
# and being read in on big-endian or vice-versa.
if _code.find('float') >= 0 and \
(numpy.any(numpy.isnan(ext_dat)) or numpy.any(numpy.isinf(ext_dat))):
errormsg += "===================================\n"
errormsg += "= WARNING: =\n"
errormsg += "= Input image: =\n"
errormsg += input+"[%d]\n"%(k+1)
errormsg += "= had floating point data values =\n"
errormsg += "= of NaN and/or Inf. =\n"
errormsg += "===================================\n"
elif _code.find('int') >= 0:
# Check INT data for max values
ext_dat_frac,ext_dat_exp = numpy.frexp(ext_dat)
if ext_dat_exp.max() == int(_bitpix) - 1:
# Potential problems with byteswapping
errormsg += "===================================\n"
errormsg += "= WARNING: =\n"
errormsg += "= Input image: =\n"
errormsg += input+"[%d]\n"%(k+1)
errormsg += "= had integer data values =\n"
errormsg += "= with maximum bitvalues. =\n"
errormsg += "===================================\n"
arr_stack[k] = ext_dat
rec = numpy.fromstring(dat[loc+data_size:loc+group_size], dtype=formats)
loc += group_size
# Add data from this GPB to table
for i in range(1, pcount+1):
val = rec[0][i-1]
if i in bools:
if val:
val = 'T'
else:
val = 'F'
cols[i-1].array[k] = val
# Based on the first group, add GPB keywords to PRIMARY header
if k == 0:
# Create separate PyFITS Card objects for each entry in 'rec'
# and update Primary HDU with these keywords after PSIZE
for i in range(1, pcount+1):
#val = rec.field(i-1)[0]
val = rec[0][i-1]
if val.dtype.kind == 'S':
val = val.decode('ascii')
if i in bools:
if val:
val = True
else:
val = False
if i in floats:
# use fromstring, format in Card is deprecated in pyfits 0.9
_str = '%-8s= %20.13G / %s' % (key[i-1], val, comm[i-1])
_card = fits.Card.fromstring(_str)
else:
_card = fits.Card(keyword=key[i-1], value=val, comment=comm[i-1])
phdr.insert(phdr_indx+i, _card)
# deal with bscale/bzero
if (_bscale != 1 or _bzero != 0):
phdr['BSCALE'] = _bscale
phdr['BZERO'] = _bzero
#hdulist.append(ext_hdu)
# Define new table based on Column definitions
ext_table = fits.TableHDU.from_columns(cols)
ext_table.header.set('EXTNAME', value=input+'.tab', after='TFIELDS')
# Add column descriptions to header of table extension to match stwfits output
for i in range(len(key)):
ext_table.header.append(fits.Card(keyword=key[i], value=comm[i]))
if errormsg != "":
errormsg += "===================================\n"
errormsg += "= This file may have been =\n"
errormsg += "= written out on a platform =\n"
errormsg += "= with a different byte-order. =\n"
errormsg += "= =\n"
errormsg += "= Please verify that the values =\n"
errormsg += "= are correct or apply the =\n"
errormsg += "= '.byteswap()' method. =\n"
errormsg += "===================================\n"
print(errormsg)
f1.close()
hdulist = fits.HDUList([fits.PrimaryHDU(header=phdr, data=arr_stack)])
hdulist.append(ext_table)
return hdulist
|
Initialises the device if required then enters a read loop taking data from the provider and passing it to the
handler. It will continue until either breakRead is true or the duration (if provided) has passed.
:return:
def start(self, measurementId, durationInSeconds=None):
"""
Initialises the device if required then enters a read loop taking data from the provider and passing it to the
handler. It will continue until either breakRead is true or the duration (if provided) has passed.
:return:
"""
logger.info(">> measurement " + measurementId +
((" for " + str(durationInSeconds)) if durationInSeconds is not None else " until break"))
self.failureCode = None
self.measurementOverflowed = False
self.dataHandler.start(measurementId)
self.breakRead = False
self.startTime = time.time()
self.doInit()
# this must follow doInit because doInit sets status to INITIALISED
self.status = RecordingDeviceStatus.RECORDING
elapsedTime = 0
try:
self._sampleIdx = 0
while True:
logger.debug(measurementId + " provideData ")
self.dataHandler.handle(self.provideData())
elapsedTime = time.time() - self.startTime
if self.breakRead or durationInSeconds is not None and elapsedTime > durationInSeconds:
logger.debug(measurementId + " breaking provideData")
self.startTime = 0
break
except:
self.status = RecordingDeviceStatus.FAILED
self.failureCode = str(sys.exc_info())
logger.exception(measurementId + " failed")
finally:
expectedSamples = self.fs * (durationInSeconds if durationInSeconds is not None else elapsedTime)
if self._sampleIdx < expectedSamples:
self.status = RecordingDeviceStatus.FAILED
self.failureCode = "Insufficient samples " + str(self._sampleIdx) + " for " + \
str(elapsedTime) + " second long run, expected " + str(expectedSamples)
self._sampleIdx = 0
if self.measurementOverflowed:
self.status = RecordingDeviceStatus.FAILED
self.failureCode = "Measurement overflow detected"
if self.status == RecordingDeviceStatus.FAILED:
logger.error("<< measurement " + measurementId + " - FAILED - " + self.failureCode)
else:
self.status = RecordingDeviceStatus.INITIALISED
logger.info("<< measurement " + measurementId + " - " + self.status.name)
self.dataHandler.stop(measurementId, self.failureCode)
if self.status == RecordingDeviceStatus.FAILED:
logger.warning("Reinitialising device after measurement failure")
self.doInit()
|
Yields the analysed wav data.
:param targetId:
:return:
def get(self, targetId):
"""
Yields the analysed wav data.
:param targetId:
:return:
"""
result = self._targetController.analyse(targetId)
if result:
if len(result) == 2:
if result[1] == 404:
return result
else:
return {'name': targetId, 'data': self._jsonify(result)}, 200
else:
return None, 404
else:
return None, 500
|
stores a new target.
:param targetId: the target to store.
:return:
def put(self, targetId):
"""
stores a new target.
:param targetId: the target to store.
:return:
"""
json = request.get_json()
if 'hinge' in json:
logger.info('Storing target ' + targetId)
if self._targetController.storeFromHinge(targetId, json['hinge']):
logger.info('Stored target ' + targetId)
return None, 200
else:
return None, 500
else:
return None, 400
|
Return a datetime for the input floating point Julian Day Count
def to_datetime(jdc):
'''Return a datetime for the input floating point Julian Day Count'''
year, month, day = gregorian.from_jd(jdc)
# in jdc: 0.0 = noon, 0.5 = midnight
# the 0.5 changes it to 0.0 = midnight, 0.5 = noon
frac = (jdc + 0.5) % 1
hours = int(24 * frac)
mfrac = frac * 24 - hours
mins = int(60 * round(mfrac, 6))
sfrac = mfrac * 60 - mins
secs = int(60 * round(sfrac, 6))
msfrac = sfrac * 60 - secs
# down to ms, which are 1/1000 of a second
ms = int(1000 * round(msfrac, 6))
return datetime(year, month, day, int(hours), int(mins), int(secs), int(ms), tzinfo=utc)
|
Slightly introverted parser for lists of dot-notation nested fields
i.e. "period.di,period.fhr" => {"period": {"di": {}, "fhr": {}}}
def dict_from_qs(qs):
''' Slightly introverted parser for lists of dot-notation nested fields
i.e. "period.di,period.fhr" => {"period": {"di": {}, "fhr": {}}}
'''
entries = qs.split(',') if qs.strip() else []
entries = [entry.strip() for entry in entries]
def _dict_from_qs(line, d):
if '.' in line:
key, value = line.split('.', 1)
d.setdefault(key, {})
return _dict_from_qs(value, d[key])
else:
d[line] = {}
def _default():
return defaultdict(_default)
d = defaultdict(_default)
for line in entries:
_dict_from_qs(line, d)
return d
|
Same as dict_from_qs, but in reverse
i.e. {"period": {"di": {}, "fhr": {}}} => "period.di,period.fhr"
def qs_from_dict(qsdict, prefix=""):
''' Same as dict_from_qs, but in reverse
i.e. {"period": {"di": {}, "fhr": {}}} => "period.di,period.fhr"
'''
prefix = prefix + '.' if prefix else ""
def descend(qsd):
for key, val in sorted(qsd.items()):
if val:
yield qs_from_dict(val, prefix + key)
else:
yield prefix + key
return ",".join(descend(qsdict))
|
Set up connection before executing function, commit and close connection
afterwards. Unless a connection already has been created.
def dbcon(func):
"""Set up connection before executing function, commit and close connection
afterwards. Unless a connection already has been created."""
@wraps(func)
def wrapper(*args, **kwargs):
self = args[0]
if self.dbcon is None:
# set up connection
self.dbcon = sqlite3.connect(self.db)
self.dbcur = self.dbcon.cursor()
self.dbcur.execute(SQL_SENSOR_TABLE)
self.dbcur.execute(SQL_TMPO_TABLE)
# execute function
try:
result = func(*args, **kwargs)
except Exception as e:
# on exception, first close connection and then raise
self.dbcon.rollback()
self.dbcon.commit()
self.dbcon.close()
self.dbcon = None
self.dbcur = None
raise e
else:
# commit everything and close connection
self.dbcon.commit()
self.dbcon.close()
self.dbcon = None
self.dbcur = None
else:
result = func(*args, **kwargs)
return result
return wrapper
|
Add new sensor to the database
Parameters
----------
sid : str
SensorId
token : str
def add(self, sid, token):
"""
Add new sensor to the database
Parameters
----------
sid : str
SensorId
token : str
"""
try:
self.dbcur.execute(SQL_SENSOR_INS, (sid, token))
except sqlite3.IntegrityError: # sensor entry exists
pass
|
Remove sensor from the database
Parameters
----------
sid : str
SensorID
def remove(self, sid):
"""
Remove sensor from the database
Parameters
----------
sid : str
SensorID
"""
self.dbcur.execute(SQL_SENSOR_DEL, (sid,))
self.dbcur.execute(SQL_TMPO_DEL, (sid,))
|
Synchronise data
Parameters
----------
sids : list of str
SensorIDs to sync
Optional, leave empty to sync everything
def sync(self, *sids):
"""
Synchronise data
Parameters
----------
sids : list of str
SensorIDs to sync
Optional, leave empty to sync everything
"""
if sids == ():
sids = [sid for (sid,) in self.dbcur.execute(SQL_SENSOR_ALL)]
for sid in sids:
self.dbcur.execute(SQL_TMPO_LAST, (sid,))
last = self.dbcur.fetchone()
if last:
rid, lvl, bid, ext = last
self._clean(sid, rid, lvl, bid)
# prevent needless polling
if time.time() < bid + 256:
return
else:
rid, lvl, bid = 0, 0, 0
self._req_sync(sid, rid, lvl, bid)
|
List all tmpo-blocks in the database
Parameters
----------
sids : list of str
SensorID's for which to list blocks
Optional, leave empty to get them all
Returns
-------
list[list[tuple]]
def list(self, *sids):
"""
List all tmpo-blocks in the database
Parameters
----------
sids : list of str
SensorID's for which to list blocks
Optional, leave empty to get them all
Returns
-------
list[list[tuple]]
"""
if sids == ():
sids = [sid for (sid,) in self.dbcur.execute(SQL_SENSOR_ALL)]
slist = []
for sid in sids:
tlist = []
for tmpo in self.dbcur.execute(SQL_TMPO_ALL, (sid,)):
tlist.append(tmpo)
sid, rid, lvl, bid, ext, ctd, blk = tmpo
self._dprintf(DBG_TMPO_WRITE, ctd, sid, rid, lvl, bid, len(blk))
slist.append(tlist)
return slist
|
Create data Series
Parameters
----------
sid : str
recycle_id : optional
head : int | pandas.Timestamp, optional
Start of the interval
default earliest available
tail : int | pandas.Timestamp, optional
End of the interval
default max epoch
datetime : bool
convert index to datetime
default True
Returns
-------
pandas.Series
def series(self, sid, recycle_id=None, head=None, tail=None,
datetime=True):
"""
Create data Series
Parameters
----------
sid : str
recycle_id : optional
head : int | pandas.Timestamp, optional
Start of the interval
default earliest available
tail : int | pandas.Timestamp, optional
End of the interval
default max epoch
datetime : bool
convert index to datetime
default True
Returns
-------
pandas.Series
"""
if head is None:
head = 0
else:
head = self._2epochs(head)
if tail is None:
tail = EPOCHS_MAX
else:
tail = self._2epochs(tail)
if recycle_id is None:
self.dbcur.execute(SQL_TMPO_RID_MAX, (sid,))
recycle_id = self.dbcur.fetchone()[0]
tlist = self.list(sid)[0]
srlist = []
for _sid, rid, lvl, bid, ext, ctd, blk in tlist:
if (recycle_id == rid
and head < self._blocktail(lvl, bid)
and tail >= bid):
srlist.append(self._blk2series(ext, blk, head, tail))
if len(srlist) > 0:
ts = pd.concat(srlist)
ts.name = sid
if datetime is True:
ts.index = pd.to_datetime(ts.index, unit="s", utc=True)
return ts
else:
return pd.Series([], name=sid)
|
Create data frame
Parameters
----------
sids : list[str]
head : int | pandas.Timestamp, optional
Start of the interval
default earliest available
tail : int | pandas.Timestamp, optional
End of the interval
default max epoch
datetime : bool
convert index to datetime
default True
Returns
-------
pandas.DataFrame
def dataframe(self, sids, head=0, tail=EPOCHS_MAX, datetime=True):
"""
Create data frame
Parameters
----------
sids : list[str]
head : int | pandas.Timestamp, optional
Start of the interval
default earliest available
tail : int | pandas.Timestamp, optional
End of the interval
default max epoch
datetime : bool
convert index to datetime
default True
Returns
-------
pandas.DataFrame
"""
if head is None:
head = 0
else:
head = self._2epochs(head)
if tail is None:
tail = EPOCHS_MAX
else:
tail = self._2epochs(tail)
series = [self.series(sid, head=head, tail=tail, datetime=False)
for sid in sids]
df = pd.concat(series, axis=1)
if datetime is True:
df.index = pd.to_datetime(df.index, unit="s", utc=True)
return df
|
Get the first available timestamp for a sensor
Parameters
----------
sid : str
SensorID
epoch : bool
default False
If True return as epoch
If False return as pd.Timestamp
Returns
-------
pd.Timestamp | int
def first_timestamp(self, sid, epoch=False):
"""
Get the first available timestamp for a sensor
Parameters
----------
sid : str
SensorID
epoch : bool
default False
If True return as epoch
If False return as pd.Timestamp
Returns
-------
pd.Timestamp | int
"""
first_block = self.dbcur.execute(SQL_TMPO_FIRST, (sid,)).fetchone()
if first_block is None:
return None
timestamp = first_block[2]
if not epoch:
timestamp = pd.Timestamp.utcfromtimestamp(timestamp)
timestamp = timestamp.tz_localize('UTC')
return timestamp
|
Get the theoretical last timestamp for a sensor
Parameters
----------
sid : str
SensorID
epoch : bool
default False
If True return as epoch
If False return as pd.Timestamp
Returns
-------
pd.Timestamp | int
def last_timestamp(self, sid, epoch=False):
"""
Get the theoretical last timestamp for a sensor
Parameters
----------
sid : str
SensorID
epoch : bool
default False
If True return as epoch
If False return as pd.Timestamp
Returns
-------
pd.Timestamp | int
"""
timestamp, value = self.last_datapoint(sid, epoch)
return timestamp
|
Parameters
----------
sid : str
SensorId
epoch : bool
default False
If True return as epoch
If False return as pd.Timestamp
Returns
-------
pd.Timestamp | int, float
def last_datapoint(self, sid, epoch=False):
"""
Parameters
----------
sid : str
SensorId
epoch : bool
default False
If True return as epoch
If False return as pd.Timestamp
Returns
-------
pd.Timestamp | int, float
"""
block = self._last_block(sid)
if block is None:
return None, None
header = block['h']
timestamp, value = header['tail']
if not epoch:
timestamp = pd.Timestamp.utcfromtimestamp(timestamp)
timestamp = timestamp.tz_localize('UTC')
return timestamp, value
|
Numpy: Modifying Array Values
http://docs.scipy.org/doc/numpy/reference/arrays.nditer.html
def _npdelta(self, a, delta):
"""Numpy: Modifying Array Values
http://docs.scipy.org/doc/numpy/reference/arrays.nditer.html"""
for x in np.nditer(a, op_flags=["readwrite"]):
delta += x
x[...] = delta
return a
|
Take a check function signature (string), and parse it to get a dict
of the keyword args and their values.
def sigStrToKwArgsDict(checkFuncSig):
""" Take a check function signature (string), and parse it to get a dict
of the keyword args and their values. """
p1 = checkFuncSig.find('(')
p2 = checkFuncSig.rfind(')')
assert p1 > 0 and p2 > 0 and p2 > p1, "Invalid signature: "+checkFuncSig
argParts = irafutils.csvSplit(checkFuncSig[p1+1:p2], ',', True)
argParts = [x.strip() for x in argParts]
retval = {}
for argPair in argParts:
argSpl = argPair.split('=', 1)
if len(argSpl) > 1:
if argSpl[0] in retval:
if isinstance(retval[argSpl[0]], (list,tuple)):
retval[argSpl[0]]+=(irafutils.stripQuotes(argSpl[1]),) # 3rd
else: # 2nd in, so convert to tuple
retval[argSpl[0]] = (retval[argSpl[0]],
irafutils.stripQuotes(argSpl[1]),)
else:
retval[argSpl[0]] = irafutils.stripQuotes(argSpl[1]) # 1st in
else:
retval[argSpl[0]] = None # eg. found "triggers=, max=6, ..."
return retval
|
Look through the keywords passed and separate the special ones we
have added from the legal/standard ones. Return both sets as two
dicts (in a tuple), as (standardKws, ourKws)
def separateKeywords(kwArgsDict):
""" Look through the keywords passed and separate the special ones we
have added from the legal/standard ones. Return both sets as two
dicts (in a tuple), as (standardKws, ourKws) """
standardKws = {}
ourKws = {}
for k in kwArgsDict:
if k in STANDARD_KEYS:
standardKws[k]=kwArgsDict[k]
else:
ourKws[k]=kwArgsDict[k]
return (standardKws, ourKws)
|
Alter the passed function signature string to add the given kewords
def addKwdArgsToSig(sigStr, kwArgsDict):
""" Alter the passed function signature string to add the given kewords """
retval = sigStr
if len(kwArgsDict) > 0:
retval = retval.strip(' ,)') # open up the r.h.s. for more args
for k in kwArgsDict:
if retval[-1] != '(': retval += ", "
retval += str(k)+"="+str(kwArgsDict[k])
retval += ')'
retval = retval
return retval
|
Defines the gaussian function to be used as the model.
def _gauss_funct(p, fjac=None, x=None, y=None, err=None,
weights=None):
"""
Defines the gaussian function to be used as the model.
"""
if p[2] != 0.0:
Z = (x - p[1]) / p[2]
model = p[0] * np.e ** (-Z ** 2 / 2.0)
else:
model = np.zeros(np.size(x))
status = 0
if weights is not None:
if err is not None:
print("Warning: Ignoring errors and using weights.\n")
return [status, (y - model) * weights]
elif err is not None:
return [status, (y - model) / err]
else:
return [status, y - model]
|
Return the gaussian fit as an object.
Parameters
----------
y: 1D Numpy array
The data to be fitted
x: 1D Numpy array
(optional) The x values of the y array. x and y must
have the same shape.
err: 1D Numpy array
(optional) 1D array with measurement errors, must be
the same shape as y
weights: 1D Numpy array
(optiional) 1D array with weights, must be the same
shape as y
par: List
(optional) Starting values for the parameters to be fitted
parinfo: Dictionary of lists
(optional) provides additional information for the
parameters. For a detailed description see nmpfit.py.
Parinfo can be used to limit parameters or keep
some of them fixed.
maxiter: number
Maximum number of iterations to perform
Default: 200
quiet: number
if set to 1, nmpfit does not print to the screen
Default: 0
Examples
--------
>>> x = np.arange(10,20, 0.1)
>>> y= 10*np.e**(-(x-15)**2/4)
>>> print(gfit1d(y,x=x, maxiter=20,quiet=1).params)
[10. 15. 1.41421356]
def gfit1d(y, x=None, err=None, weights=None, par=None, parinfo=None,
maxiter=200, quiet=0):
"""
Return the gaussian fit as an object.
Parameters
----------
y: 1D Numpy array
The data to be fitted
x: 1D Numpy array
(optional) The x values of the y array. x and y must
have the same shape.
err: 1D Numpy array
(optional) 1D array with measurement errors, must be
the same shape as y
weights: 1D Numpy array
(optiional) 1D array with weights, must be the same
shape as y
par: List
(optional) Starting values for the parameters to be fitted
parinfo: Dictionary of lists
(optional) provides additional information for the
parameters. For a detailed description see nmpfit.py.
Parinfo can be used to limit parameters or keep
some of them fixed.
maxiter: number
Maximum number of iterations to perform
Default: 200
quiet: number
if set to 1, nmpfit does not print to the screen
Default: 0
Examples
--------
>>> x = np.arange(10,20, 0.1)
>>> y= 10*np.e**(-(x-15)**2/4)
>>> print(gfit1d(y,x=x, maxiter=20,quiet=1).params)
[10. 15. 1.41421356]
"""
y = y.astype(np.float)
if weights is not None:
weights = weights.astype(np.float)
if err is not None:
err = err.astype(np.float)
if x is None and len(y.shape) == 1:
x = np.arange(len(y)).astype(np.float)
if x.shape != y.shape:
print("input arrays X and Y must be of equal shape.\n")
return
fa = {'x': x, 'y': y, 'err': err, 'weights': weights}
if par is not None:
p = par
else:
ysigma = y.std()
ind = np.nonzero(y > ysigma)[0]
if len(ind) != 0:
xind = int(ind.mean())
p2 = x[xind]
p1 = y[xind]
p3 = 1.0
else:
ymax = y.max()
ymin = y.min()
ymean= y.mean()
if (ymax - ymean) > (abs(ymin - ymean)):
p1 = ymax
else: p1 = ymin
ind = (np.nonzero(y == p1))[0]
p2 = x.mean()
p3 = 1.
p = [p1, p2, p3]
m = nmpfit.mpfit(_gauss_funct, p,parinfo = parinfo, functkw=fa,
maxiter=maxiter, quiet=quiet)
if (m.status <= 0): print('error message = ', m.errmsg)
return m
|
filter lets django managers use `objects.filter` on a hashable object.
def filter(self, *args, **kwargs):
"""filter lets django managers use `objects.filter` on a hashable object."""
obj = kwargs.pop(self.object_property_name, None)
if obj is not None:
kwargs['object_hash'] = self.model._compute_hash(obj)
return super().filter(*args, **kwargs)
|
this method allows django managers use `objects.get_or_create` and
`objects.update_or_create` on a hashable object.
def _extract_model_params(self, defaults, **kwargs):
"""this method allows django managers use `objects.get_or_create` and
`objects.update_or_create` on a hashable object.
"""
obj = kwargs.pop(self.object_property_name, None)
if obj is not None:
kwargs['object_hash'] = self.model._compute_hash(obj)
lookup, params = super()._extract_model_params(defaults, **kwargs)
if obj is not None:
params[self.object_property_name] = obj
del params['object_hash']
return lookup, params
|
a private method that persists an estimator object to the filesystem
def persist(self):
"""a private method that persists an estimator object to the filesystem"""
if self.object_hash:
data = dill.dumps(self.object_property)
f = ContentFile(data)
self.object_file.save(self.object_hash, f, save=False)
f.close()
self._persisted = True
return self._persisted
|
a private method that loads an estimator object from the filesystem
def load(self):
"""a private method that loads an estimator object from the filesystem"""
if self.is_file_persisted:
self.object_file.open()
temp = dill.loads(self.object_file.read())
self.set_object(temp)
self.object_file.close()
|
Return an Estimator object given the path of the file, relative to the MEDIA_ROOT
def create_from_file(cls, filename):
"""Return an Estimator object given the path of the file, relative to the MEDIA_ROOT"""
obj = cls()
obj.object_file = filename
obj.load()
return obj
|
Return our application dir. Create it if it doesn't exist.
def getAppDir():
""" Return our application dir. Create it if it doesn't exist. """
# Be sure the resource dir exists
theDir = os.path.expanduser('~/.')+APP_NAME.lower()
if not os.path.exists(theDir):
try:
os.mkdir(theDir)
except OSError:
print('Could not create "'+theDir+'" to save GUI settings.')
theDir = "./"+APP_NAME.lower()
return theDir
|
Take the arg (usually called theTask), which can be either a subclass
of ConfigObjPars, or a string package name, or a .cfg filename - no matter
what it is - take it and return a ConfigObjPars object.
strict - bool - warning severity, passed to the ConfigObjPars() ctor
setAllToDefaults - bool - if theTask is a pkg name, force all to defaults
def getObjectFromTaskArg(theTask, strict, setAllToDefaults):
""" Take the arg (usually called theTask), which can be either a subclass
of ConfigObjPars, or a string package name, or a .cfg filename - no matter
what it is - take it and return a ConfigObjPars object.
strict - bool - warning severity, passed to the ConfigObjPars() ctor
setAllToDefaults - bool - if theTask is a pkg name, force all to defaults
"""
# Already in the form we need (instance of us or of subclass)
if isinstance(theTask, ConfigObjPars):
if setAllToDefaults:
raise RuntimeError('Called getObjectFromTaskArg with existing'+\
' object AND setAllToDefaults - is unexpected use case.')
# If it is an existing object, make sure it's internal param list is
# up to date with it's ConfigObj dict, since the user may have manually
# edited the dict before calling us.
theTask.syncParamList(False) # use strict somehow?
# Note - some validation is done here in IrafPar creation, but it is
# not the same validation done by the ConfigObj s/w (no check funcs).
# Do we want to do that too here?
return theTask
# For example, a .cfg file
if os.path.isfile(str(theTask)):
try:
return ConfigObjPars(theTask, strict=strict,
setAllToDefaults=setAllToDefaults)
except KeyError:
# this might just be caused by a file sitting in the local cwd with
# the same exact name as the package we want to import, let's see
if theTask.find('.') > 0: # it has an extension, like '.cfg'
raise # this really was an error
# else we drop down to the next step - try it as a pkg name
# Else it must be a Python package name to load
if isinstance(theTask, str) and setAllToDefaults:
# NOTE how we pass the task name string in setAllToDefaults
return ConfigObjPars('', setAllToDefaults=theTask, strict=strict)
else:
return getParsObjForPyPkg(theTask, strict)
|
Read a config file and pull out the value of a given keyword.
def getEmbeddedKeyVal(cfgFileName, kwdName, dflt=None):
""" Read a config file and pull out the value of a given keyword. """
# Assume this is a ConfigObj file. Use that s/w to quickly read it and
# put it in dict format. Assume kwd is at top level (not in a section).
# The input may also be a .cfgspc file.
#
# Only use ConfigObj here as a tool to generate a dict from a file - do
# not use the returned object as a ConfigObj per se. As such, we can call
# with "simple" format, ie. no cfgspc, no val'n, and "list_values"=False.
try:
junkObj = configobj.ConfigObj(cfgFileName, list_values=False)
except:
if kwdName == TASK_NAME_KEY:
raise KeyError('Can not parse as a parameter config file: '+ \
'\n\t'+os.path.realpath(cfgFileName))
else:
raise KeyError('Unfound key "'+kwdName+'" while parsing: '+ \
'\n\t'+os.path.realpath(cfgFileName))
if kwdName in junkObj:
retval = junkObj[kwdName]
del junkObj
return retval
# Not found
if dflt is not None:
del junkObj
return dflt
else:
if kwdName == TASK_NAME_KEY:
raise KeyError('Can not parse as a parameter config file: '+ \
'\n\t'+os.path.realpath(cfgFileName))
else:
raise KeyError('Unfound key "'+kwdName+'" while parsing: '+ \
'\n\t'+os.path.realpath(cfgFileName))
|
Locate the configuration files for/from/within a given python package.
pkgName is a string python package name. This is used unless pkgObj
is given, in which case pkgName is taken from pkgObj.__name__.
theExt is either '.cfg' or '.cfgspc'. If the task name is known, it is
given as taskName, otherwise one is determined using the pkgName.
Returns a tuple of (package-object, cfg-file-name).
def findCfgFileForPkg(pkgName, theExt, pkgObj=None, taskName=None):
""" Locate the configuration files for/from/within a given python package.
pkgName is a string python package name. This is used unless pkgObj
is given, in which case pkgName is taken from pkgObj.__name__.
theExt is either '.cfg' or '.cfgspc'. If the task name is known, it is
given as taskName, otherwise one is determined using the pkgName.
Returns a tuple of (package-object, cfg-file-name). """
# arg check
ext = theExt
if ext[0] != '.': ext = '.'+theExt
# Do the import, if needed
pkgsToTry = {}
if pkgObj:
pkgsToTry[pkgObj.__name__] = pkgObj
else:
# First try something simple like a regular or dotted import
try:
fl = []
if pkgName.find('.') > 0:
fl = [ pkgName[:pkgName.rfind('.')], ]
pkgsToTry[str(pkgName)] = __import__(str(pkgName), fromlist=fl)
except:
throwIt = True
# One last case to try is something like "csc_kill" from
# "acstools.csc_kill", but this convenience capability will only be
# allowed if the parent pkg (acstools) has already been imported.
if isinstance(pkgName, string_types) and pkgName.find('.') < 0:
matches = [x for x in sys.modules.keys() \
if x.endswith("."+pkgName)]
if len(matches)>0:
throwIt = False
for mmm in matches:
pkgsToTry[mmm] = sys.modules[mmm]
if throwIt:
raise NoCfgFileError("Unfound package or "+ext+" file via: "+\
"import "+str(pkgName))
# Now that we have the package object (or a few of them to try), for each
# one find the .cfg or .cfgspc file, and return
# Return as soon as ANY match is found.
for aPkgName in pkgsToTry:
aPkg = pkgsToTry[aPkgName]
path = os.path.dirname(aPkg.__file__)
if len(path) < 1: path = '.'
flist = irafutils.rglob(path, "*"+ext)
if len(flist) < 1:
continue
# Go through these and find the first one for the assumed or given task
# name. The task name for 'BigBlackBox.drizzle' would be 'drizzle'.
if taskName is None:
taskName = aPkgName.split(".")[-1]
flist.sort()
for f in flist:
# A .cfg file gets checked for _task_name_=val, but a .cfgspc file
# will have a string check function signature as the val.
if ext == '.cfg':
itsTask = getEmbeddedKeyVal(f, TASK_NAME_KEY, '')
else: # .cfgspc
sigStr = getEmbeddedKeyVal(f, TASK_NAME_KEY, '')
# .cfgspc file MUST have an entry for TASK_NAME_KEY w/ a default
itsTask = vtor_checks.sigStrToKwArgsDict(sigStr)['default']
if itsTask == taskName:
# We've found the correct file in an installation area. Return
# the package object and the found file.
return aPkg, f
# What, are you still here?
raise NoCfgFileError('No valid '+ext+' files found in package: "'+ \
str(pkgName)+'" for task: "'+str(taskName)+'"')
|
Finds all installed tasks by examining any .cfg files found on disk
at and under the given directory, as an installation might be.
This returns a dict of { file name : task name }
def findAllCfgTasksUnderDir(aDir):
""" Finds all installed tasks by examining any .cfg files found on disk
at and under the given directory, as an installation might be.
This returns a dict of { file name : task name }
"""
retval = {}
for f in irafutils.rglob(aDir, '*.cfg'):
retval[f] = getEmbeddedKeyVal(f, TASK_NAME_KEY, '')
return retval
|
This is a specialized function which is meant only to keep the
same code from needlessly being much repeated throughout this
application. This must be kept as fast and as light as possible.
This checks a given directory for .cfg files matching a given
task. If recurse is True, it will check subdirectories.
If aTask is None, it returns all files and ignores aTask.
def getCfgFilesInDirForTask(aDir, aTask, recurse=False):
""" This is a specialized function which is meant only to keep the
same code from needlessly being much repeated throughout this
application. This must be kept as fast and as light as possible.
This checks a given directory for .cfg files matching a given
task. If recurse is True, it will check subdirectories.
If aTask is None, it returns all files and ignores aTask.
"""
if recurse:
flist = irafutils.rglob(aDir, '*.cfg')
else:
flist = glob.glob(aDir+os.sep+'*.cfg')
if aTask:
retval = []
for f in flist:
try:
if aTask == getEmbeddedKeyVal(f, TASK_NAME_KEY, ''):
retval.append(f)
except Exception as e:
print('Warning: '+str(e))
return retval
else:
return flist
|
Locate the appropriate ConfigObjPars (or subclass) within the given
package. NOTE this begins the same way as getUsrCfgFilesForPyPkg().
Look for .cfg file matches in these places, in this order:
1 - any named .cfg file in current directory matching given task
2 - if there exists a ~/.teal/<taskname>.cfg file
3 - any named .cfg file in SOME*ENV*VAR directory matching given task
4 - the installed default .cfg file (with the given package)
def getParsObjForPyPkg(pkgName, strict):
""" Locate the appropriate ConfigObjPars (or subclass) within the given
package. NOTE this begins the same way as getUsrCfgFilesForPyPkg().
Look for .cfg file matches in these places, in this order:
1 - any named .cfg file in current directory matching given task
2 - if there exists a ~/.teal/<taskname>.cfg file
3 - any named .cfg file in SOME*ENV*VAR directory matching given task
4 - the installed default .cfg file (with the given package)
"""
# Get the python package and it's .cfg file - need this no matter what
installedPkg, installedFile = findCfgFileForPkg(pkgName, '.cfg')
theFile = None
tname = getEmbeddedKeyVal(installedFile, TASK_NAME_KEY)
# See if the user has any of their own .cfg files in the cwd for this task
if theFile is None:
flist = getCfgFilesInDirForTask(os.getcwd(), tname)
if len(flist) > 0:
if len(flist) == 1: # can skip file times sort
theFile = flist[0]
else:
# There are a few different choices. In the absence of
# requirements to the contrary, just take the latest. Set up a
# list of tuples of (mtime, fname) so we can sort by mtime.
ftups = [ (os.stat(f)[stat.ST_MTIME], f) for f in flist]
ftups.sort()
theFile = ftups[-1][1]
# See if the user has any of their own app-dir .cfg files for this task
if theFile is None:
flist = getCfgFilesInDirForTask(getAppDir(), tname) # verifies tname
flist = [f for f in flist if os.path.basename(f) == tname+'.cfg']
if len(flist) > 0:
theFile = flist[0]
assert len(flist) == 1, str(flist) # should never happen
# Add code to check an env. var defined area? (speak to users first)
# Did we find one yet? If not, use the installed version
useInstVer = False
if theFile is None:
theFile = installedFile
useInstVer = True
# Create a stand-in instance from this file. Force a read-only situation
# if we are dealing with the installed, (expected to be) unwritable file.
return ConfigObjPars(theFile, associatedPkg=installedPkg,
forceReadOnly=useInstVer, strict=strict)
|
See if the user has one of their own local .cfg files for this task,
such as might be created automatically during the save of a read-only
package, and return their names.
def getUsrCfgFilesForPyPkg(pkgName):
""" See if the user has one of their own local .cfg files for this task,
such as might be created automatically during the save of a read-only
package, and return their names. """
# Get the python package and it's .cfg file
thePkg, theFile = findCfgFileForPkg(pkgName, '.cfg')
# See if the user has any of their own local .cfg files for this task
tname = getEmbeddedKeyVal(theFile, TASK_NAME_KEY)
flist = getCfgFilesInDirForTask(getAppDir(), tname)
return flist
|
See if we have write-privileges to this file. If we do, and we
are not supposed to, then fix that case.
def checkSetReadOnly(fname, raiseOnErr = False):
""" See if we have write-privileges to this file. If we do, and we
are not supposed to, then fix that case. """
if os.access(fname, os.W_OK):
# We can write to this but it is supposed to be read-only. Fix it.
# Take away usr-write, leave group and other alone, though it
# may be simpler to just force/set it to: r--r--r-- or r--------
irafutils.setWritePrivs(fname, False, ignoreErrors= not raiseOnErr)
|
Takes a dict of vals and dicts (so, a tree) as input, and returns
a flat dict (only one level) as output. All key-vals are moved to
the top level. Sub-section dict names (keys) are ignored/dropped.
If there are name collisions, an error is raised.
def flattenDictTree(aDict):
""" Takes a dict of vals and dicts (so, a tree) as input, and returns
a flat dict (only one level) as output. All key-vals are moved to
the top level. Sub-section dict names (keys) are ignored/dropped.
If there are name collisions, an error is raised. """
retval = {}
for k in aDict:
val = aDict[k]
if isinstance(val, dict):
# This val is a dict, get its data (recursively) into a flat dict
subDict = flattenDictTree(val)
# Merge its dict of data into ours, watching for NO collisions
rvKeySet = set(retval.keys())
sdKeySet = set(subDict.keys())
intr = rvKeySet.intersection(sdKeySet)
if len(intr) > 0:
raise DuplicateKeyError("Flattened dict already has "+ \
"key(s): "+str(list(intr))+" - cannot flatten this.")
else:
retval.update(subDict)
else:
if k in retval:
raise DuplicateKeyError("Flattened dict already has key: "+\
k+" - cannot flatten this.")
else:
retval[k] = val
return retval
|
Return the number of times the given par exists in this dict-tree,
since the same key name may be used in different sections/sub-sections.
def countKey(theDict, name):
""" Return the number of times the given par exists in this dict-tree,
since the same key name may be used in different sections/sub-sections. """
retval = 0
for key in theDict:
val = theDict[key]
if isinstance(val, dict):
retval += countKey(val, name) # recurse
else:
if key == name:
retval += 1
# can't break, even tho we found a hit, other items on
# this level will not be named "name", but child dicts
# may have further counts
return retval
|
Find the given par. Return tuple: (its own (sub-)dict, its value).
Returns the first match found, without checking whether the given key name
is unique or whether it is used in multiple sections.
def findFirstPar(theDict, name, _depth=0):
""" Find the given par. Return tuple: (its own (sub-)dict, its value).
Returns the first match found, without checking whether the given key name
is unique or whether it is used in multiple sections. """
for key in theDict:
val = theDict[key]
# print _depth*' ', key, str(val)[:40]
if isinstance(val, dict):
retval = findFirstPar(val, name, _depth=_depth+1) # recurse
if retval is not None:
return retval
# else keep looking
else:
if key == name:
return theDict, theDict[name]
# else keep looking
# if we get here then we have searched this whole (sub)-section and its
# descendants, and found no matches. only raise if we are at the top.
if _depth == 0:
raise KeyError(name)
else:
return None
|
Find the given par. Return tuple: (its own (sub-)dict, its value).
def findScopedPar(theDict, scope, name):
""" Find the given par. Return tuple: (its own (sub-)dict, its value). """
# Do not search (like findFirstPar), but go right to the correct
# sub-section, and pick it up. Assume it is there as stated.
if len(scope):
theDict = theDict[scope] # ! only goes one level deep - enhance !
return theDict, theDict[name]
|
Sets a par's value without having to give its scope/section.
def setPar(theDict, name, value):
""" Sets a par's value without having to give its scope/section. """
section, previousVal = findFirstPar(theDict, name)
# "section" is the actual object, not a copy
section[name] = value
|
Merge the inputDict values into an existing given configObj instance.
The inputDict is a "flat" dict - it has no sections/sub-sections. The
configObj may have sub-sections nested to any depth. This will raise a
DuplicateKeyError if one of the inputDict keys is used more than once in
configObj (e.g. within two different sub-sections).
def mergeConfigObj(configObj, inputDict):
""" Merge the inputDict values into an existing given configObj instance.
The inputDict is a "flat" dict - it has no sections/sub-sections. The
configObj may have sub-sections nested to any depth. This will raise a
DuplicateKeyError if one of the inputDict keys is used more than once in
configObj (e.g. within two different sub-sections). """
# Expanded upon Warren's version in astrodrizzle
# Verify that all inputDict keys in configObj are unique within configObj
for key in inputDict:
if countKey(configObj, key) > 1:
raise DuplicateKeyError(key)
# Now update configObj with each inputDict item
for key in inputDict:
setPar(configObj, key, inputDict[key])
|
Find any lost/missing parameters in this cfg file, compared to what
the .cfgspc says should be there. This method is recommended by the
ConfigObj docs. Return a stringified list of item errors.
def findTheLost(config_file, configspec_file, skipHidden=True):
""" Find any lost/missing parameters in this cfg file, compared to what
the .cfgspc says should be there. This method is recommended by the
ConfigObj docs. Return a stringified list of item errors. """
# do some sanity checking, but don't (yet) make this a serious error
if not os.path.exists(config_file):
print("ERROR: Config file not found: "+config_file)
return []
if not os.path.exists(configspec_file):
print("ERROR: Configspec file not found: "+configspec_file)
return []
tmpObj = configobj.ConfigObj(config_file, configspec=configspec_file)
simval = configobj.SimpleVal()
test = tmpObj.validate(simval)
if test == True:
return []
# If we get here, there is a dict returned of {key1: bool, key2: bool}
# which matches the shape of the config obj. We need to walk it to
# find the Falses, since they are the missing pars.
missing = []
flattened = configobj.flatten_errors(tmpObj, test)
# But, before we move on, skip/eliminate any 'hidden' items from our list,
# since hidden items are really supposed to be missing from the .cfg file.
if len(flattened) > 0 and skipHidden:
keepers = []
for tup in flattened:
keep = True
# hidden section
if len(tup[0])>0 and isHiddenName(tup[0][-1]):
keep = False
# hidden par (in a section, or at the top level)
elif tup[1] is not None and isHiddenName(tup[1]):
keep = False
if keep:
keepers.append(tup)
flattened = keepers
flatStr = flattened2str(flattened, missing=True)
return flatStr
|
Return True if this string name denotes a hidden par or section
def isHiddenName(astr):
""" Return True if this string name denotes a hidden par or section """
if astr is not None and len(astr) > 2 and astr.startswith('_') and \
astr.endswith('_'):
return True
else:
return False
|
Return a pretty-printed multi-line string version of the output of
flatten_errors. Know that flattened comes in the form of a list
of keys that failed. Each member of the list is a tuple::
([list of sections...], key, result)
so we turn that into a string. Set missing to True if all the input
problems are from missing items. Set extra to True if all the input
problems are from extra items.
def flattened2str(flattened, missing=False, extra=False):
""" Return a pretty-printed multi-line string version of the output of
flatten_errors. Know that flattened comes in the form of a list
of keys that failed. Each member of the list is a tuple::
([list of sections...], key, result)
so we turn that into a string. Set missing to True if all the input
problems are from missing items. Set extra to True if all the input
problems are from extra items. """
if flattened is None or len(flattened) < 1:
return ''
retval = ''
for sections, key, result in flattened:
# Name the section and item, to start the message line
if sections is None or len(sections) == 0:
retval += '\t"'+key+'"'
elif len(sections) == 1:
if key is None:
# a whole section is missing at the top-level; see if hidden
junk = sections[0]
if isHiddenName(junk):
continue # this missing or extra section is not an error
else:
retval += '\tSection "'+sections[0]+'"'
else:
retval += '\t"'+sections[0]+'.'+key+'"'
else: # len > 1
joined = '.'.join(sections)
joined = '"'+joined+'"'
if key is None:
retval += '\tSection '+joined
else:
retval += '\t"'+key+'" from '+joined
# End the msg line with "what seems to be the trouble" with this one
if missing and result==False:
retval += ' is missing.'
elif extra:
if result:
retval += ' is an unexpected section. Is your file out of date?'
else:
retval += ' is an unexpected parameter. Is your file out of date?'
elif isinstance(result, bool):
retval += ' has an invalid value'
else:
retval += ' is invalid, '+result.message
retval += '\n\n'
return retval.rstrip()
|
Return name of file where we are expected to be saved if no files
for this task have ever been saved, and the user wishes to save. If
stub is True, the result will be <dir>/<taskname>_stub.cfg instead of
<dir>/<taskname>.cfg.
def getDefaultSaveFilename(self, stub=False):
""" Return name of file where we are expected to be saved if no files
for this task have ever been saved, and the user wishes to save. If
stub is True, the result will be <dir>/<taskname>_stub.cfg instead of
<dir>/<taskname>.cfg. """
if stub:
return self._rcDir+os.sep+self.__taskName+'_stub.cfg'
else:
return self._rcDir+os.sep+self.__taskName+'.cfg'
|
Set or reset the internal param list from the dict's contents.
def syncParamList(self, firstTime, preserve_order=True):
""" Set or reset the internal param list from the dict's contents. """
# See the note in setParam about this design.
# Get latest par values from dict. Make sure we do not
# change the id of the __paramList pointer here.
new_list = self._getParamsFromConfigDict(self, initialPass=firstTime)
# dumpCfgspcTo=sys.stdout)
# Have to add this odd last one for the sake of the GUI (still?)
if self._forUseWithEpar:
new_list.append(basicpar.IrafParS(['$nargs','s','h','N']))
if len(self.__paramList) > 0 and preserve_order:
# Here we have the most up-to-date data from the actual data
# model, the ConfigObj dict, and we need to use it to fill in
# our param list. BUT, we need to preserve the order our list
# has had up until now (by unique parameter name).
namesInOrder = [p.fullName() for p in self.__paramList]
assert len(namesInOrder) == len(new_list), \
'Mismatch in num pars, had: '+str(len(namesInOrder))+ \
', now we have: '+str(len(new_list))+', '+ \
str([p.fullName() for p in new_list])
self.__paramList[:] = [] # clear list, keep same pointer
# create a flat dict view of new_list, for ease of use in next step
new_list_dict = {} # can do in one step in v2.7
for par in new_list: new_list_dict[par.fullName()] = par
# populate
for fn in namesInOrder:
self.__paramList.append(new_list_dict[fn])
else:
# Here we just take the data in whatever order it came.
self.__paramList[:] = new_list
|
Return a par list just like ours, but with all default values.
def getDefaultParList(self):
""" Return a par list just like ours, but with all default values. """
# The code below (create a new set-to-dflts obj) is correct, but it
# adds a tenth of a second to startup. Clicking "Defaults" in the
# GUI does not call this. But this can be used to set the order seen.
# But first check for rare case of no cfg file name
if self.filename is None:
# this is a .cfgspc-only kind of object so far
self.filename = self.getDefaultSaveFilename(stub=True)
return copy.deepcopy(self.__paramList)
tmpObj = ConfigObjPars(self.filename, associatedPkg=self.__assocPkg,
setAllToDefaults=True, strict=False)
return tmpObj.getParList()
|
Find the ConfigObj entry. Update the __paramList.
def setParam(self, name, val, scope='', check=1, idxHint=None):
""" Find the ConfigObj entry. Update the __paramList. """
theDict, oldVal = findScopedPar(self, scope, name)
# Set the value, even if invalid. It needs to be set before
# the validation step (next).
theDict[name] = val
# If need be, check the proposed value. Ideally, we'd like to
# (somehow elegantly) only check this one item. For now, the best
# shortcut is to only validate this section.
if check:
ans=self.validate(self._vtor, preserve_errors=True, section=theDict)
if ans != True:
flatStr = "All values are invalid!"
if ans != False:
flatStr = flattened2str(configobj.flatten_errors(self, ans))
raise RuntimeError("Validation error: "+flatStr)
# Note - this design needs work. Right now there are two copies
# of the data: the ConfigObj dict, and the __paramList ...
# We rely on the idxHint arg so we don't have to search the __paramList
# every time this is called, which could really slows things down.
assert idxHint is not None, "ConfigObjPars relies on a valid idxHint"
assert name == self.__paramList[idxHint].name, \
'Error in setParam, name: "'+name+'" != name at idxHint: "'+\
self.__paramList[idxHint].name+'", idxHint: '+str(idxHint)
self.__paramList[idxHint].set(val)
|
Write parameter data to filename (string or filehandle)
def saveParList(self, *args, **kw):
"""Write parameter data to filename (string or filehandle)"""
if 'filename' in kw:
filename = kw['filename']
if not filename:
filename = self.getFilename()
if not filename:
raise ValueError("No filename specified to save parameters")
if hasattr(filename,'write'):
fh = filename
absFileName = os.path.abspath(fh.name)
else:
absFileName = os.path.expanduser(filename)
absDir = os.path.dirname(absFileName)
if len(absDir) and not os.path.isdir(absDir): os.makedirs(absDir)
fh = open(absFileName,'w')
numpars = len(self.__paramList)
if self._forUseWithEpar: numpars -= 1
if not self.final_comment: self.final_comment = [''] # force \n at EOF
# Empty the ConfigObj version of section.defaults since that is based
# on an assumption incorrect for us, and override with our own list.
# THIS IS A BIT OF MONKEY-PATCHING! WATCH FUTURE VERSION CHANGES!
# See Trac ticket #762.
while len(self.defaults):
self.defaults.pop(-1) # empty it, keeping ref
for key in self._neverWrite:
self.defaults.append(key)
# Note also that we are only overwriting the top/main section's
# "defaults" list, but EVERY [sub-]section has such an attribute...
# Now write to file, delegating work to ConfigObj (note that ConfigObj
# write() skips any items listed by name in the self.defaults list)
self.write(fh)
fh.close()
retval = str(numpars) + " parameters written to " + absFileName
self.filename = absFileName # reset our own ConfigObj filename attr
self.debug('Keys not written: '+str(self.defaults))
return retval
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.