text
string |
|---|
This may be overridden by a subclass.
def run(self, *args, **kw):
""" This may be overridden by a subclass. """
if self._runFunc is not None:
# remove the two args sent by EditParDialog which we do not use
if 'mode' in kw: kw.pop('mode')
if '_save' in kw: kw.pop('_save')
return self._runFunc(self, *args, **kw)
else:
raise taskpars.NoExecError('No way to run task "'+self.__taskName+\
'". You must either override the "run" method in your '+ \
'ConfigObjPars subclass, or you must supply a "run" '+ \
'function in your package.')
|
Print all the trigger logic to a string and return it.
def triggerLogicToStr(self):
""" Print all the trigger logic to a string and return it. """
try:
import json
except ImportError:
return "Cannot dump triggers/dependencies/executes (need json)"
retval = "TRIGGERS:\n"+json.dumps(self._allTriggers, indent=3)
retval += "\nDEPENDENCIES:\n"+json.dumps(self._allDepdcs, indent=3)
retval += "\nTO EXECUTE:\n"+json.dumps(self._allExecutes, indent=3)
retval += "\n"
return retval
|
Given a config file, find its associated config-spec file, and
return the full pathname of the file.
def _findAssociatedConfigSpecFile(self, cfgFileName):
""" Given a config file, find its associated config-spec file, and
return the full pathname of the file. """
# Handle simplest 2 cases first: co-located or local .cfgspc file
retval = "."+os.sep+self.__taskName+".cfgspc"
if os.path.isfile(retval): return retval
retval = os.path.dirname(cfgFileName)+os.sep+self.__taskName+".cfgspc"
if os.path.isfile(retval): return retval
# Also try the resource dir
retval = self.getDefaultSaveFilename()+'spc' # .cfgspc
if os.path.isfile(retval): return retval
# Now try and see if there is a matching .cfgspc file in/under an
# associated package, if one is defined.
if self.__assocPkg is not None:
x, theFile = findCfgFileForPkg(None, '.cfgspc',
pkgObj = self.__assocPkg,
taskName = self.__taskName)
return theFile
# Finally try to import the task name and see if there is a .cfgspc
# file in that directory
x, theFile = findCfgFileForPkg(self.__taskName, '.cfgspc',
taskName = self.__taskName)
if os.path.exists(theFile):
return theFile
# unfound
raise NoCfgFileError('Unfound config-spec file for task: "'+ \
self.__taskName+'"')
|
Walk the given ConfigObj dict pulling out IRAF-like parameters into
a list. Since this operates on a dict this can be called recursively.
This is also our chance to find and pull out triggers and such
dependencies.
def _getParamsFromConfigDict(self, cfgObj, scopePrefix='',
initialPass=False, dumpCfgspcTo=None):
""" Walk the given ConfigObj dict pulling out IRAF-like parameters into
a list. Since this operates on a dict this can be called recursively.
This is also our chance to find and pull out triggers and such
dependencies. """
# init
retval = []
if initialPass and len(scopePrefix) < 1:
self._posArgs = [] # positional args [2-tuples]: (index,scopedName)
# FOR SECURITY: the following 3 chunks of data,
# _allTriggers, _allDepdcs, _allExecutes,
# are collected ONLY from the .cfgspc file
self._allTriggers = {}
self._allDepdcs = {}
self._allExecutes = {}
# start walking ("tell yer story walkin, buddy")
# NOTE: this relies on the "in" operator returning keys in the
# order that they exist in the dict (which depends on ConfigObj keeping
# the order they were found in the original file)
for key in cfgObj:
val = cfgObj[key]
# Do we need to skip this - if not a par, like a rule or something
toBeHidden = isHiddenName(key)
if toBeHidden:
if key not in self._neverWrite and key != TASK_NAME_KEY:
self._neverWrite.append(key)
# yes TASK_NAME_KEY is hidden, but it IS output to the .cfg
# a section
if isinstance(val, dict):
if not toBeHidden:
if len(list(val.keys()))>0 and len(retval)>0:
# Here is where we sneak in the section comment
# This is so incredibly kludgy (as the code was), it
# MUST be revamped eventually! This is for the epar GUI.
prevPar = retval[-1]
# Use the key (or its comment?) as the section header
prevPar.set(prevPar.get('p_prompt')+'\n\n'+key,
field='p_prompt', check=0)
if dumpCfgspcTo:
dumpCfgspcTo.write('\n['+key+']\n')
# a logical grouping (append its params)
pfx = scopePrefix+'.'+key
pfx = pfx.strip('.')
retval = retval + self._getParamsFromConfigDict(val, pfx,
initialPass, dumpCfgspcTo) # recurse
else:
# a param
fields = []
choicesOrMin = None
fields.append(key) # name
dtype = 's'
cspc = None
if cfgObj.configspec:
cspc = cfgObj.configspec.get(key) # None if not found
chk_func_name = ''
chk_args_dict = {}
if cspc:
chk_func_name = cspc[:cspc.find('(')]
chk_args_dict = vtor_checks.sigStrToKwArgsDict(cspc)
if chk_func_name.find('option') >= 0:
dtype = 's'
# convert the choices string to a list (to weed out kwds)
x = cspc[cspc.find('(')+1:-1] # just the options() args
# cspc e.g.: option_kw("poly5","nearest","linear", default="poly5", comment="Interpolant (poly5,nearest,linear)")
x = x.split(',') # tokenize
# but! comment value may have commas in it, find it
# using it's equal sign, rm all after it
has_eq = [i for i in x if i.find('=')>=0]
if len(has_eq) > 0:
x = x[: x.index(has_eq[0]) ]
# rm spaces, extra quotes; rm kywd arg pairs
x = [i.strip("' ") for i in x if i.find('=')<0]
choicesOrMin = '|'+'|'.join(x)+'|' # IRAF format for enums
elif chk_func_name.find('boolean') >= 0: dtype = 'b'
elif chk_func_name.find('float_or_') >= 0: dtype = 'r'
elif chk_func_name.find('float') >= 0: dtype = 'R'
elif chk_func_name.find('integer_or_') >= 0: dtype = 'i'
elif chk_func_name.find('integer') >= 0: dtype = 'I'
elif chk_func_name.find('action') >= 0: dtype = 'z'
fields.append(dtype)
fields.append('a')
if type(val)==bool:
if val: fields.append('yes')
else: fields.append('no')
else:
fields.append(val)
fields.append(choicesOrMin)
fields.append(None)
# Primarily use description from .cfgspc file (0). But, allow
# overrides from .cfg file (1) if different.
dscrp0 = chk_args_dict.get('comment','').strip() # ok if missing
dscrp1 = cfgObj.inline_comments[key]
if dscrp1 is None:
dscrp1 = ''
while len(dscrp1) > 0 and dscrp1[0] in (' ','#'):
dscrp1 = dscrp1[1:] # .cfg file comments start with '#'
dscrp1 = dscrp1.strip()
# Now, decide what to do/say about the descriptions
if len(dscrp1) > 0:
dscrp = dscrp0
if dscrp0 != dscrp1: # allow override if different
dscrp = dscrp1+eparoption.DSCRPTN_FLAG # flag it
if initialPass:
if dscrp0 == '' and cspc is None:
# this is a case where this par isn't in the
# .cfgspc; ignore, it is caught/error later
pass
else:
self.debug('Description of "'+key+ \
'" overridden, from: '+repr(dscrp0)+\
' to: '+repr(dscrp1))
fields.append(dscrp)
else:
# set the field for the GUI
fields.append(dscrp0)
# ALSO set it in the dict so it is written to file later
cfgObj.inline_comments[key] = '# '+dscrp0
# This little section, while never intended to be used during
# normal operation, could save a lot of manual work.
if dumpCfgspcTo:
junk = cspc
junk = key+' = '+junk.strip()
if junk.find(' comment=')<0:
junk = junk[:-1]+", comment="+ \
repr(irafutils.stripQuotes(dscrp1.strip()))+")"
dumpCfgspcTo.write(junk+'\n')
# Create the par
if not toBeHidden or chk_func_name.find('action')==0:
par = basicpar.parFactory(fields, True)
par.setScope(scopePrefix)
retval.append(par)
# else this is a hidden key
# The next few items require a fully scoped name
absKeyName = scopePrefix+'.'+key # assumed to be unique
# Check for pars marked to be positional args
if initialPass:
pos = chk_args_dict.get('pos')
if pos:
# we'll sort them later, on demand
self._posArgs.append( (int(pos), scopePrefix, key) )
# Check for triggers and/or dependencies
if initialPass:
# What triggers what? (thats why theres an 's' in the kwd)
# try "trigger" (old)
if chk_args_dict.get('trigger'):
print("WARNING: outdated version of .cfgspc!! for "+
self.__taskName+", 'trigger' unused for "+
absKeyName)
# try "triggers"
trgs = chk_args_dict.get('triggers')
if trgs and len(trgs)>0:
# eg. _allTriggers['STEP2.xy'] == ('_rule1_','_rule3_')
assert absKeyName not in self._allTriggers, \
'More than 1 of these in .cfgspc?: '+absKeyName
# we force this to always be a sequence
if isinstance(trgs, (list,tuple)):
self._allTriggers[absKeyName] = trgs
else:
self._allTriggers[absKeyName] = (trgs,)
# try "executes"
excs = chk_args_dict.get('executes')
if excs and len(excs)>0:
# eg. _allExecutes['STEP2.xy'] == ('_rule1_','_rule3_')
assert absKeyName not in self._allExecutes, \
'More than 1 of these in .cfgspc?: '+absKeyName
# we force this to always be a sequence
if isinstance(excs, (list,tuple)):
self._allExecutes[absKeyName] = excs
else:
self._allExecutes[absKeyName] = (excs,)
# Dependencies? (besides these used here, may someday
# add: 'range_from', 'warn_if', etc.)
depName = None
if not depName:
depType = 'active_if'
depName = chk_args_dict.get(depType) # e.g. =='_rule1_'
if not depName:
depType = 'inactive_if'
depName = chk_args_dict.get(depType)
if not depName:
depType = 'is_set_by'
depName = chk_args_dict.get(depType)
if not depName:
depType = 'set_yes_if'
depName = chk_args_dict.get(depType)
if not depName:
depType = 'set_no_if'
depName = chk_args_dict.get(depType)
if not depName:
depType = 'is_disabled_by'
depName = chk_args_dict.get(depType)
# NOTE - the above few lines stops at the first dependency
# found (depName) for a given par. If, in the future a
# given par can have >1 dependency than we need to revamp!!
if depName:
# Add to _allDepdcs dict: (val is dict of pars:types)
#
# e.g. _allDepdcs['_rule1_'] == \
# {'STEP3.ra': 'active_if',
# 'STEP3.dec': 'active_if',
# 'STEP3.azimuth': 'inactive_if'}
if depName in self._allDepdcs:
thisRulesDict = self._allDepdcs[depName]
assert not absKeyName in thisRulesDict, \
'Cant yet handle multiple actions for the '+ \
'same par and the same rule. For "'+depName+ \
'" dict was: '+str(thisRulesDict)+ \
' while trying to add to it: '+\
str({absKeyName:depType})
thisRulesDict[absKeyName] = depType
else:
self._allDepdcs[depName] = {absKeyName:depType}
# else no dependencies found for this chk_args_dict
return retval
|
For a given item (scope + name), return all strings (in a tuple)
that it is meant to trigger, if any exist. Returns None is none.
def getTriggerStrings(self, parScope, parName):
""" For a given item (scope + name), return all strings (in a tuple)
that it is meant to trigger, if any exist. Returns None is none. """
# The data structure of _allTriggers was chosen for how easily/quickly
# this particular access can be made here.
fullName = parScope+'.'+parName
return self._allTriggers.get(fullName)
|
For a given item (scope + name), return all strings (in a tuple)
that it is meant to execute, if any exist. Returns None is none.
def getExecuteStrings(self, parScope, parName):
""" For a given item (scope + name), return all strings (in a tuple)
that it is meant to execute, if any exist. Returns None is none. """
# The data structure of _allExecutes was chosen for how easily/quickly
# this particular access can be made here.
fullName = parScope+'.'+parName
return self._allExecutes.get(fullName)
|
Return a list, in order, of any parameters marked with "pos=N" in
the .cfgspc file.
def getPosArgs(self):
""" Return a list, in order, of any parameters marked with "pos=N" in
the .cfgspc file. """
if len(self._posArgs) < 1: return []
# The first item in the tuple is the index, so we now sort by it
self._posArgs.sort()
# Build a return list
retval = []
for idx, scope, name in self._posArgs:
theDict, val = findScopedPar(self, scope, name)
retval.append(val)
return retval
|
Return a dict of all normal dict parameters - that is, all
parameters NOT marked with "pos=N" in the .cfgspc file. This will
also exclude all hidden parameters (metadata, rules, etc).
def getKwdArgs(self, flatten = False):
""" Return a dict of all normal dict parameters - that is, all
parameters NOT marked with "pos=N" in the .cfgspc file. This will
also exclude all hidden parameters (metadata, rules, etc). """
# Start with a full deep-copy. What complicates this method is the
# idea of sub-sections. This dict can have dicts as values, and so on.
dcopy = self.dict() # ConfigObj docs say this is a deep-copy
# First go through the dict removing all positional args
for idx,scope,name in self._posArgs:
theDict, val = findScopedPar(dcopy, scope, name)
# 'theDict' may be dcopy, or it may be a dict under it
theDict.pop(name)
# Then go through the dict removing all hidden items ('_item_name_')
for k in list(dcopy.keys()):
if isHiddenName(k):
dcopy.pop(k)
# Done with the nominal operation
if not flatten:
return dcopy
# They have asked us to flatten the structure - to bring all parameters
# up to the top level, even if they are in sub-sections. So we look
# for values that are dicts. We will throw something if we end up
# with name collisions at the top level as a result of this.
return flattenDictTree(dcopy)
|
For the given item name (and scope), we are being asked to try
the given value to see if it would pass validation. We are not
to set it, but just try it. We return a tuple:
If it fails, we return: (False, the last known valid value).
On success, we return: (True, None).
def tryValue(self, name, val, scope=''):
""" For the given item name (and scope), we are being asked to try
the given value to see if it would pass validation. We are not
to set it, but just try it. We return a tuple:
If it fails, we return: (False, the last known valid value).
On success, we return: (True, None). """
# SIMILARITY BETWEEN THIS AND setParam() SHOULD BE CONSOLIDATED!
# Set the value, even if invalid. It needs to be set before
# the validation step (next).
theDict, oldVal = findScopedPar(self, scope, name)
if oldVal == val: return (True, None) # assume oldVal is valid
theDict[name] = val
# Check the proposed value. Ideally, we'd like to
# (somehow elegantly) only check this one item. For now, the best
# shortcut is to only validate this section.
ans=self.validate(self._vtor, preserve_errors=True, section=theDict)
# No matter what ans is, immediately return the item to its original
# value since we are only checking the value here - not setting.
theDict[name] = oldVal
# Now see what the validation check said
errStr = ''
if ans != True:
flatStr = "All values are invalid!"
if ans != False:
flatStr = flattened2str(configobj.flatten_errors(self, ans))
errStr = "Validation error: "+flatStr # for now this info is unused
# Done
if len(errStr): return (False, oldVal) # was an error
else: return (True, None)
|
Use ConfigObj's get_extra_values() call to find any extra/unknown
parameters we may have loaded. Return a string similar to findTheLost.
If deleteAlso is True, this will also delete any extra/unknown items.
def listTheExtras(self, deleteAlso):
""" Use ConfigObj's get_extra_values() call to find any extra/unknown
parameters we may have loaded. Return a string similar to findTheLost.
If deleteAlso is True, this will also delete any extra/unknown items.
"""
# get list of extras
extras = configobj.get_extra_values(self)
# extras is in format: [(sections, key), (sections, key), ]
# but we need: [(sections, key, result), ...] - set all results to
# a bool just to make it the right shape. BUT, since we are in
# here anyway, make that bool mean something - hide info in it about
# whether that extra item is a section (1) or just a single par (0)
#
# simplified, this is: expanded = [ (x+(abool,)) for x in extras]
expanded = [ (x+ \
( bool(len(x[0])<1 and hasattr(self[x[1]], 'keys')), ) \
) for x in extras]
retval = ''
if expanded:
retval = flattened2str(expanded, extra=1)
# but before we return, delete them (from ourself!) if requested to
if deleteAlso:
for tup_to_del in extras:
target = self
# descend the tree to the dict where this items is located.
# (this works because target is not a copy (because the dict
# type is mutable))
location = tup_to_del[0]
for subdict in location: target = target[subdict]
# delete it
target.pop(tup_to_del[1])
return retval
|
Reloads the measurements from the backing store.
:return: 200 if success.
def get(self):
"""
Reloads the measurements from the backing store.
:return: 200 if success.
"""
try:
self._measurementController.reloadCompletedMeasurements()
return None, 200
except:
logger.exception("Failed to reload measurements")
return str(sys.exc_info()), 500
|
Writes each sample to a csv file.
:param data: the samples.
:return:
def handle(self, data):
"""
Writes each sample to a csv file.
:param data: the samples.
:return:
"""
self.logger.debug("Handling " + str(len(data)) + " data items")
for datum in data:
if isinstance(datum, dict):
# these have to wrapped in a list for python 3.4 due to a change in the implementation
# of OrderedDict in python 3.5+ (which means .keys() and .values() are sequences in 3.5+)
if self._first:
self._csv.writerow(list(datum.keys()))
self._first = False
self._csv.writerow(list(datum.values()))
elif isinstance(datum, list):
self._csv.writerow(datum)
else:
self.logger.warning("Ignoring unsupported data type " + str(type(datum)) + " : " + str(datum))
|
Posts to the target to tell it a named measurement is starting.
:param measurementId:
def start(self, measurementId):
"""
Posts to the target to tell it a named measurement is starting.
:param measurementId:
"""
self.sendURL = self.rootURL + measurementId + '/' + self.deviceName
self.startResponseCode = self._doPut(self.sendURL)
|
puts the data in the target.
:param data: the data to post.
:return:
def handle(self, data):
"""
puts the data in the target.
:param data: the data to post.
:return:
"""
self.dataResponseCode.append(self._doPut(self.sendURL + '/data', data=data))
|
informs the target the named measurement has completed
:param measurementId: the measurement that has completed.
:return:
def stop(self, measurementId, failureReason=None):
"""
informs the target the named measurement has completed
:param measurementId: the measurement that has completed.
:return:
"""
if failureReason is None:
self.endResponseCode = self._doPut(self.sendURL + "/complete")
else:
self.endResponseCode = self._doPut(self.sendURL + "/failed", data={'failureReason': failureReason})
self.sendURL = None
|
Convert decimal dotted quad string to long integer
>>> int(dottedQuadToNum('1 '))
1
>>> int(dottedQuadToNum(' 1.2'))
16777218
>>> int(dottedQuadToNum(' 1.2.3 '))
16908291
>>> int(dottedQuadToNum('1.2.3.4'))
16909060
>>> dottedQuadToNum('255.255.255.255')
4294967295
>>> dottedQuadToNum('255.255.255.256')
Traceback (most recent call last):
ValueError: Not a good dotted-quad IP: 255.255.255.256
def dottedQuadToNum(ip):
"""
Convert decimal dotted quad string to long integer
>>> int(dottedQuadToNum('1 '))
1
>>> int(dottedQuadToNum(' 1.2'))
16777218
>>> int(dottedQuadToNum(' 1.2.3 '))
16908291
>>> int(dottedQuadToNum('1.2.3.4'))
16909060
>>> dottedQuadToNum('255.255.255.255')
4294967295
>>> dottedQuadToNum('255.255.255.256')
Traceback (most recent call last):
ValueError: Not a good dotted-quad IP: 255.255.255.256
"""
# import here to avoid it when ip_addr values are not used
import socket, struct
try:
return struct.unpack('!L',
socket.inet_aton(ip.strip()))[0]
except socket.error:
raise ValueError('Not a good dotted-quad IP: %s' % ip)
return
|
Convert long int to dotted quad string
>>> numToDottedQuad(long(-1))
Traceback (most recent call last):
ValueError: Not a good numeric IP: -1
>>> numToDottedQuad(long(1))
'0.0.0.1'
>>> numToDottedQuad(long(16777218))
'1.0.0.2'
>>> numToDottedQuad(long(16908291))
'1.2.0.3'
>>> numToDottedQuad(long(16909060))
'1.2.3.4'
>>> numToDottedQuad(long(4294967295))
'255.255.255.255'
>>> numToDottedQuad(long(4294967296))
Traceback (most recent call last):
ValueError: Not a good numeric IP: 4294967296
def numToDottedQuad(num):
"""
Convert long int to dotted quad string
>>> numToDottedQuad(long(-1))
Traceback (most recent call last):
ValueError: Not a good numeric IP: -1
>>> numToDottedQuad(long(1))
'0.0.0.1'
>>> numToDottedQuad(long(16777218))
'1.0.0.2'
>>> numToDottedQuad(long(16908291))
'1.2.0.3'
>>> numToDottedQuad(long(16909060))
'1.2.3.4'
>>> numToDottedQuad(long(4294967295))
'255.255.255.255'
>>> numToDottedQuad(long(4294967296))
Traceback (most recent call last):
ValueError: Not a good numeric IP: 4294967296
"""
# import here to avoid it when ip_addr values are not used
import socket, struct
# no need to intercept here, 4294967295 is fine
if num > 4294967295 or num < 0:
raise ValueError('Not a good numeric IP: %s' % num)
try:
return socket.inet_ntoa(
struct.pack('!L', long(num)))
except (socket.error, struct.error, OverflowError):
raise ValueError('Not a good numeric IP: %s' % num)
|
Return numbers from inputs or raise VdtParamError.
Lets ``None`` pass through.
Pass in keyword argument ``to_float=True`` to
use float for the conversion rather than int.
>>> _is_num_param(('', ''), (0, 1.0))
[0, 1]
>>> _is_num_param(('', ''), (0, 1.0), to_float=True)
[0.0, 1.0]
>>> _is_num_param(('a'), ('a')) # doctest: +SKIP
Traceback (most recent call last):
VdtParamError: passed an incorrect value "a" for parameter "a".
def _is_num_param(names, values, to_float=False):
"""
Return numbers from inputs or raise VdtParamError.
Lets ``None`` pass through.
Pass in keyword argument ``to_float=True`` to
use float for the conversion rather than int.
>>> _is_num_param(('', ''), (0, 1.0))
[0, 1]
>>> _is_num_param(('', ''), (0, 1.0), to_float=True)
[0.0, 1.0]
>>> _is_num_param(('a'), ('a')) # doctest: +SKIP
Traceback (most recent call last):
VdtParamError: passed an incorrect value "a" for parameter "a".
"""
fun = to_float and float or int
out_params = []
for (name, val) in zip(names, values):
if val is None:
out_params.append(val)
elif isinstance(val, number_or_string_types):
try:
out_params.append(fun(val))
except ValueError:
raise VdtParamError(name, val)
else:
raise VdtParamError(name, val)
return out_params
|
A check that tests that a given value is an integer (int, or long)
and optionally, between bounds. A negative value is accepted, while
a float will fail.
If the value is a string, then the conversion is done - if possible.
Otherwise a VdtError is raised.
>>> vtor = Validator()
>>> vtor.check('integer', '-1')
-1
>>> vtor.check('integer', '0')
0
>>> vtor.check('integer', 9)
9
>>> vtor.check('integer', 'a') # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "a" is of the wrong type.
>>> vtor.check('integer', '2.2') # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "2.2" is of the wrong type.
>>> vtor.check('integer(10)', '20')
20
>>> vtor.check('integer(max=20)', '15')
15
>>> vtor.check('integer(10)', '9') # doctest: +SKIP
Traceback (most recent call last):
VdtValueTooSmallError: the value "9" is too small.
>>> vtor.check('integer(10)', 9) # doctest: +SKIP
Traceback (most recent call last):
VdtValueTooSmallError: the value "9" is too small.
>>> vtor.check('integer(max=20)', '35') # doctest: +SKIP
Traceback (most recent call last):
VdtValueTooBigError: the value "35" is too big.
>>> vtor.check('integer(max=20)', 35) # doctest: +SKIP
Traceback (most recent call last):
VdtValueTooBigError: the value "35" is too big.
>>> vtor.check('integer(0, 9)', False)
0
def is_integer(value, min=None, max=None):
"""
A check that tests that a given value is an integer (int, or long)
and optionally, between bounds. A negative value is accepted, while
a float will fail.
If the value is a string, then the conversion is done - if possible.
Otherwise a VdtError is raised.
>>> vtor = Validator()
>>> vtor.check('integer', '-1')
-1
>>> vtor.check('integer', '0')
0
>>> vtor.check('integer', 9)
9
>>> vtor.check('integer', 'a') # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "a" is of the wrong type.
>>> vtor.check('integer', '2.2') # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "2.2" is of the wrong type.
>>> vtor.check('integer(10)', '20')
20
>>> vtor.check('integer(max=20)', '15')
15
>>> vtor.check('integer(10)', '9') # doctest: +SKIP
Traceback (most recent call last):
VdtValueTooSmallError: the value "9" is too small.
>>> vtor.check('integer(10)', 9) # doctest: +SKIP
Traceback (most recent call last):
VdtValueTooSmallError: the value "9" is too small.
>>> vtor.check('integer(max=20)', '35') # doctest: +SKIP
Traceback (most recent call last):
VdtValueTooBigError: the value "35" is too big.
>>> vtor.check('integer(max=20)', 35) # doctest: +SKIP
Traceback (most recent call last):
VdtValueTooBigError: the value "35" is too big.
>>> vtor.check('integer(0, 9)', False)
0
"""
(min_val, max_val) = _is_num_param(('min', 'max'), (min, max))
if not isinstance(value, int_or_string_types):
raise VdtTypeError(value)
if isinstance(value, string_types):
# if it's a string - does it represent an integer ?
try:
value = int(value)
except ValueError:
raise VdtTypeError(value)
if (min_val is not None) and (value < min_val):
raise VdtValueTooSmallError(value)
if (max_val is not None) and (value > max_val):
raise VdtValueTooBigError(value)
return value
|
A check that tests that a given value is a float
(an integer will be accepted), and optionally - that it is between bounds.
If the value is a string, then the conversion is done - if possible.
Otherwise a VdtError is raised.
This can accept negative values.
>>> vtor = Validator()
>>> vtor.check('float', '2')
2.0
From now on we multiply the value to avoid comparing decimals
>>> vtor.check('float', '-6.8') * 10
-68.0
>>> vtor.check('float', '12.2') * 10
122.0
>>> vtor.check('float', 8.4) * 10
84.0
>>> vtor.check('float', 'a') # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "a" is of the wrong type.
>>> vtor.check('float(10.1)', '10.2') * 10
102.0
>>> vtor.check('float(max=20.2)', '15.1') * 10
151.0
>>> vtor.check('float(10.0)', '9.0') # doctest: +SKIP
Traceback (most recent call last):
VdtValueTooSmallError: the value "9.0" is too small.
>>> vtor.check('float(max=20.0)', '35.0') # doctest: +SKIP
Traceback (most recent call last):
VdtValueTooBigError: the value "35.0" is too big.
def is_float(value, min=None, max=None):
"""
A check that tests that a given value is a float
(an integer will be accepted), and optionally - that it is between bounds.
If the value is a string, then the conversion is done - if possible.
Otherwise a VdtError is raised.
This can accept negative values.
>>> vtor = Validator()
>>> vtor.check('float', '2')
2.0
From now on we multiply the value to avoid comparing decimals
>>> vtor.check('float', '-6.8') * 10
-68.0
>>> vtor.check('float', '12.2') * 10
122.0
>>> vtor.check('float', 8.4) * 10
84.0
>>> vtor.check('float', 'a') # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "a" is of the wrong type.
>>> vtor.check('float(10.1)', '10.2') * 10
102.0
>>> vtor.check('float(max=20.2)', '15.1') * 10
151.0
>>> vtor.check('float(10.0)', '9.0') # doctest: +SKIP
Traceback (most recent call last):
VdtValueTooSmallError: the value "9.0" is too small.
>>> vtor.check('float(max=20.0)', '35.0') # doctest: +SKIP
Traceback (most recent call last):
VdtValueTooBigError: the value "35.0" is too big.
"""
(min_val, max_val) = _is_num_param(
('min', 'max'), (min, max), to_float=True)
if not isinstance(value, number_or_string_types):
raise VdtTypeError(value)
if not isinstance(value, float):
# if it's a string - does it represent a float ?
try:
value = float(value)
except ValueError:
raise VdtTypeError(value)
if (min_val is not None) and (value < min_val):
raise VdtValueTooSmallError(value)
if (max_val is not None) and (value > max_val):
raise VdtValueTooBigError(value)
return value
|
Check if the value represents a boolean.
>>> vtor = Validator()
>>> vtor.check('boolean', 0)
0
>>> vtor.check('boolean', False)
0
>>> vtor.check('boolean', '0')
0
>>> vtor.check('boolean', 'off')
0
>>> vtor.check('boolean', 'false')
0
>>> vtor.check('boolean', 'no')
0
>>> vtor.check('boolean', 'nO')
0
>>> vtor.check('boolean', 'NO')
0
>>> vtor.check('boolean', 1)
1
>>> vtor.check('boolean', True)
1
>>> vtor.check('boolean', '1')
1
>>> vtor.check('boolean', 'on')
1
>>> vtor.check('boolean', 'true')
1
>>> vtor.check('boolean', 'yes')
1
>>> vtor.check('boolean', 'Yes')
1
>>> vtor.check('boolean', 'YES')
1
>>> vtor.check('boolean', '') # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "" is of the wrong type.
>>> vtor.check('boolean', 'up') # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "up" is of the wrong type.
def is_boolean(value):
"""
Check if the value represents a boolean.
>>> vtor = Validator()
>>> vtor.check('boolean', 0)
0
>>> vtor.check('boolean', False)
0
>>> vtor.check('boolean', '0')
0
>>> vtor.check('boolean', 'off')
0
>>> vtor.check('boolean', 'false')
0
>>> vtor.check('boolean', 'no')
0
>>> vtor.check('boolean', 'nO')
0
>>> vtor.check('boolean', 'NO')
0
>>> vtor.check('boolean', 1)
1
>>> vtor.check('boolean', True)
1
>>> vtor.check('boolean', '1')
1
>>> vtor.check('boolean', 'on')
1
>>> vtor.check('boolean', 'true')
1
>>> vtor.check('boolean', 'yes')
1
>>> vtor.check('boolean', 'Yes')
1
>>> vtor.check('boolean', 'YES')
1
>>> vtor.check('boolean', '') # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "" is of the wrong type.
>>> vtor.check('boolean', 'up') # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "up" is of the wrong type.
"""
if isinstance(value, string_types):
try:
return bool_dict[value.lower()]
except KeyError:
raise VdtTypeError(value)
# we do an equality test rather than an identity test
# this ensures Python 2.2 compatibilty
# and allows 0 and 1 to represent True and False
if value == False:
return False
elif value == True:
return True
else:
raise VdtTypeError(value)
|
Check that the supplied value is an Internet Protocol address, v.4,
represented by a dotted-quad string, i.e. '1.2.3.4'.
>>> vtor = Validator()
>>> vtor.check('ip_addr', '1 ')
'1'
>>> vtor.check('ip_addr', ' 1.2')
'1.2'
>>> vtor.check('ip_addr', ' 1.2.3 ')
'1.2.3'
>>> vtor.check('ip_addr', '1.2.3.4')
'1.2.3.4'
>>> vtor.check('ip_addr', '0.0.0.0')
'0.0.0.0'
>>> vtor.check('ip_addr', '255.255.255.255')
'255.255.255.255'
>>> vtor.check('ip_addr', '255.255.255.256') # doctest: +SKIP
Traceback (most recent call last):
VdtValueError: the value "255.255.255.256" is unacceptable.
>>> vtor.check('ip_addr', '1.2.3.4.5') # doctest: +SKIP
Traceback (most recent call last):
VdtValueError: the value "1.2.3.4.5" is unacceptable.
>>> vtor.check('ip_addr', 0) # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "0" is of the wrong type.
def is_ip_addr(value):
"""
Check that the supplied value is an Internet Protocol address, v.4,
represented by a dotted-quad string, i.e. '1.2.3.4'.
>>> vtor = Validator()
>>> vtor.check('ip_addr', '1 ')
'1'
>>> vtor.check('ip_addr', ' 1.2')
'1.2'
>>> vtor.check('ip_addr', ' 1.2.3 ')
'1.2.3'
>>> vtor.check('ip_addr', '1.2.3.4')
'1.2.3.4'
>>> vtor.check('ip_addr', '0.0.0.0')
'0.0.0.0'
>>> vtor.check('ip_addr', '255.255.255.255')
'255.255.255.255'
>>> vtor.check('ip_addr', '255.255.255.256') # doctest: +SKIP
Traceback (most recent call last):
VdtValueError: the value "255.255.255.256" is unacceptable.
>>> vtor.check('ip_addr', '1.2.3.4.5') # doctest: +SKIP
Traceback (most recent call last):
VdtValueError: the value "1.2.3.4.5" is unacceptable.
>>> vtor.check('ip_addr', 0) # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "0" is of the wrong type.
"""
if not isinstance(value, string_types):
raise VdtTypeError(value)
value = value.strip()
try:
dottedQuadToNum(value)
except ValueError:
raise VdtValueError(value)
return value
|
Check that the value is a list of values.
You can optionally specify the minimum and maximum number of members.
It does no check on list members.
>>> vtor = Validator()
>>> vtor.check('list', ())
[]
>>> vtor.check('list', [])
[]
>>> vtor.check('list', (1, 2))
[1, 2]
>>> vtor.check('list', [1, 2])
[1, 2]
>>> vtor.check('list(3)', (1, 2)) # doctest: +SKIP
Traceback (most recent call last):
VdtValueTooShortError: the value "(1, 2)" is too short.
>>> vtor.check('list(max=5)', (1, 2, 3, 4, 5, 6)) # doctest: +SKIP
Traceback (most recent call last):
VdtValueTooLongError: the value "(1, 2, 3, 4, 5, 6)" is too long.
>>> vtor.check('list(min=3, max=5)', (1, 2, 3, 4)) # doctest: +SKIP
[1, 2, 3, 4]
>>> vtor.check('list', 0) # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "0" is of the wrong type.
>>> vtor.check('list', '12') # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "12" is of the wrong type.
def is_list(value, min=None, max=None):
"""
Check that the value is a list of values.
You can optionally specify the minimum and maximum number of members.
It does no check on list members.
>>> vtor = Validator()
>>> vtor.check('list', ())
[]
>>> vtor.check('list', [])
[]
>>> vtor.check('list', (1, 2))
[1, 2]
>>> vtor.check('list', [1, 2])
[1, 2]
>>> vtor.check('list(3)', (1, 2)) # doctest: +SKIP
Traceback (most recent call last):
VdtValueTooShortError: the value "(1, 2)" is too short.
>>> vtor.check('list(max=5)', (1, 2, 3, 4, 5, 6)) # doctest: +SKIP
Traceback (most recent call last):
VdtValueTooLongError: the value "(1, 2, 3, 4, 5, 6)" is too long.
>>> vtor.check('list(min=3, max=5)', (1, 2, 3, 4)) # doctest: +SKIP
[1, 2, 3, 4]
>>> vtor.check('list', 0) # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "0" is of the wrong type.
>>> vtor.check('list', '12') # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "12" is of the wrong type.
"""
(min_len, max_len) = _is_num_param(('min', 'max'), (min, max))
if isinstance(value, string_types):
raise VdtTypeError(value)
try:
num_members = len(value)
except TypeError:
raise VdtTypeError(value)
if min_len is not None and num_members < min_len:
raise VdtValueTooShortError(value)
if max_len is not None and num_members > max_len:
raise VdtValueTooLongError(value)
return list(value)
|
Check that the value is a list of integers.
You can optionally specify the minimum and maximum number of members.
Each list member is checked that it is an integer.
>>> vtor = Validator()
>>> vtor.check('int_list', ())
[]
>>> vtor.check('int_list', [])
[]
>>> vtor.check('int_list', (1, 2))
[1, 2]
>>> vtor.check('int_list', [1, 2])
[1, 2]
>>> vtor.check('int_list', [1, 'a']) # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "a" is of the wrong type.
def is_int_list(value, min=None, max=None):
"""
Check that the value is a list of integers.
You can optionally specify the minimum and maximum number of members.
Each list member is checked that it is an integer.
>>> vtor = Validator()
>>> vtor.check('int_list', ())
[]
>>> vtor.check('int_list', [])
[]
>>> vtor.check('int_list', (1, 2))
[1, 2]
>>> vtor.check('int_list', [1, 2])
[1, 2]
>>> vtor.check('int_list', [1, 'a']) # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "a" is of the wrong type.
"""
return [is_integer(mem) for mem in is_list(value, min, max)]
|
Check that the value is a list of booleans.
You can optionally specify the minimum and maximum number of members.
Each list member is checked that it is a boolean.
>>> vtor = Validator()
>>> vtor.check('bool_list', ())
[]
>>> vtor.check('bool_list', [])
[]
>>> check_res = vtor.check('bool_list', (True, False))
>>> check_res == [True, False]
1
>>> check_res = vtor.check('bool_list', [True, False])
>>> check_res == [True, False]
1
>>> vtor.check('bool_list', [True, 'a']) # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "a" is of the wrong type.
def is_bool_list(value, min=None, max=None):
"""
Check that the value is a list of booleans.
You can optionally specify the minimum and maximum number of members.
Each list member is checked that it is a boolean.
>>> vtor = Validator()
>>> vtor.check('bool_list', ())
[]
>>> vtor.check('bool_list', [])
[]
>>> check_res = vtor.check('bool_list', (True, False))
>>> check_res == [True, False]
1
>>> check_res = vtor.check('bool_list', [True, False])
>>> check_res == [True, False]
1
>>> vtor.check('bool_list', [True, 'a']) # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "a" is of the wrong type.
"""
return [is_boolean(mem) for mem in is_list(value, min, max)]
|
Check that the value is a list of floats.
You can optionally specify the minimum and maximum number of members.
Each list member is checked that it is a float.
>>> vtor = Validator()
>>> vtor.check('float_list', ())
[]
>>> vtor.check('float_list', [])
[]
>>> vtor.check('float_list', (1, 2.0))
[1.0, 2.0]
>>> vtor.check('float_list', [1, 2.0])
[1.0, 2.0]
>>> vtor.check('float_list', [1, 'a']) # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "a" is of the wrong type.
def is_float_list(value, min=None, max=None):
"""
Check that the value is a list of floats.
You can optionally specify the minimum and maximum number of members.
Each list member is checked that it is a float.
>>> vtor = Validator()
>>> vtor.check('float_list', ())
[]
>>> vtor.check('float_list', [])
[]
>>> vtor.check('float_list', (1, 2.0))
[1.0, 2.0]
>>> vtor.check('float_list', [1, 2.0])
[1.0, 2.0]
>>> vtor.check('float_list', [1, 'a']) # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "a" is of the wrong type.
"""
return [is_float(mem) for mem in is_list(value, min, max)]
|
Check that the value is a list of strings.
You can optionally specify the minimum and maximum number of members.
Each list member is checked that it is a string.
>>> vtor = Validator()
>>> vtor.check('string_list', ())
[]
>>> vtor.check('string_list', [])
[]
>>> vtor.check('string_list', ('a', 'b'))
['a', 'b']
>>> vtor.check('string_list', ['a', 1]) # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "1" is of the wrong type.
>>> vtor.check('string_list', 'hello') # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "hello" is of the wrong type.
def is_string_list(value, min=None, max=None):
"""
Check that the value is a list of strings.
You can optionally specify the minimum and maximum number of members.
Each list member is checked that it is a string.
>>> vtor = Validator()
>>> vtor.check('string_list', ())
[]
>>> vtor.check('string_list', [])
[]
>>> vtor.check('string_list', ('a', 'b'))
['a', 'b']
>>> vtor.check('string_list', ['a', 1]) # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "1" is of the wrong type.
>>> vtor.check('string_list', 'hello') # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "hello" is of the wrong type.
"""
if isinstance(value, string_types):
raise VdtTypeError(value)
return [is_string(mem) for mem in is_list(value, min, max)]
|
Check that the value is a list of IP addresses.
You can optionally specify the minimum and maximum number of members.
Each list member is checked that it is an IP address.
>>> vtor = Validator()
>>> vtor.check('ip_addr_list', ())
[]
>>> vtor.check('ip_addr_list', [])
[]
>>> vtor.check('ip_addr_list', ('1.2.3.4', '5.6.7.8'))
['1.2.3.4', '5.6.7.8']
>>> vtor.check('ip_addr_list', ['a']) # doctest: +SKIP
Traceback (most recent call last):
VdtValueError: the value "a" is unacceptable.
def is_ip_addr_list(value, min=None, max=None):
"""
Check that the value is a list of IP addresses.
You can optionally specify the minimum and maximum number of members.
Each list member is checked that it is an IP address.
>>> vtor = Validator()
>>> vtor.check('ip_addr_list', ())
[]
>>> vtor.check('ip_addr_list', [])
[]
>>> vtor.check('ip_addr_list', ('1.2.3.4', '5.6.7.8'))
['1.2.3.4', '5.6.7.8']
>>> vtor.check('ip_addr_list', ['a']) # doctest: +SKIP
Traceback (most recent call last):
VdtValueError: the value "a" is unacceptable.
"""
return [is_ip_addr(mem) for mem in is_list(value, min, max)]
|
Check that a value is a list, coercing strings into
a list with one member. Useful where users forget the
trailing comma that turns a single value into a list.
You can optionally specify the minimum and maximum number of members.
A minumum of greater than one will fail if the user only supplies a
string.
>>> vtor = Validator()
>>> vtor.check('force_list', ())
[]
>>> vtor.check('force_list', [])
[]
>>> vtor.check('force_list', 'hello')
['hello']
def force_list(value, min=None, max=None):
"""
Check that a value is a list, coercing strings into
a list with one member. Useful where users forget the
trailing comma that turns a single value into a list.
You can optionally specify the minimum and maximum number of members.
A minumum of greater than one will fail if the user only supplies a
string.
>>> vtor = Validator()
>>> vtor.check('force_list', ())
[]
>>> vtor.check('force_list', [])
[]
>>> vtor.check('force_list', 'hello')
['hello']
"""
if not isinstance(value, (list, tuple)):
value = [value]
return is_list(value, min, max)
|
Check that the value is a list.
Allow specifying the type of each member.
Work on lists of specific lengths.
You specify each member as a positional argument specifying type
Each type should be one of the following strings :
'integer', 'float', 'ip_addr', 'string', 'boolean'
So you can specify a list of two strings, followed by
two integers as :
mixed_list('string', 'string', 'integer', 'integer')
The length of the list must match the number of positional
arguments you supply.
>>> vtor = Validator()
>>> mix_str = "mixed_list('integer', 'float', 'ip_addr', 'string', 'boolean')"
>>> check_res = vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a', True))
>>> check_res == [1, 2.0, '1.2.3.4', 'a', True]
1
>>> check_res = vtor.check(mix_str, ('1', '2.0', '1.2.3.4', 'a', 'True'))
>>> check_res == [1, 2.0, '1.2.3.4', 'a', True]
1
>>> vtor.check(mix_str, ('b', 2.0, '1.2.3.4', 'a', True)) # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "b" is of the wrong type.
>>> vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a')) # doctest: +SKIP
Traceback (most recent call last):
VdtValueTooShortError: the value "(1, 2.0, '1.2.3.4', 'a')" is too short.
>>> vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a', 1, 'b')) # doctest: +SKIP
Traceback (most recent call last):
VdtValueTooLongError: the value "(1, 2.0, '1.2.3.4', 'a', 1, 'b')" is too long.
>>> vtor.check(mix_str, 0) # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "0" is of the wrong type.
This test requires an elaborate setup, because of a change in error string
output from the interpreter between Python 2.2 and 2.3 .
>>> res_seq = (
... 'passed an incorrect value "',
... 'yoda',
... '" for parameter "mixed_list".',
... )
>>> res_str = "'".join(res_seq)
>>> try:
... vtor.check('mixed_list("yoda")', ('a'))
... except VdtParamError as err:
... str(err) == res_str
1
def is_mixed_list(value, *args):
"""
Check that the value is a list.
Allow specifying the type of each member.
Work on lists of specific lengths.
You specify each member as a positional argument specifying type
Each type should be one of the following strings :
'integer', 'float', 'ip_addr', 'string', 'boolean'
So you can specify a list of two strings, followed by
two integers as :
mixed_list('string', 'string', 'integer', 'integer')
The length of the list must match the number of positional
arguments you supply.
>>> vtor = Validator()
>>> mix_str = "mixed_list('integer', 'float', 'ip_addr', 'string', 'boolean')"
>>> check_res = vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a', True))
>>> check_res == [1, 2.0, '1.2.3.4', 'a', True]
1
>>> check_res = vtor.check(mix_str, ('1', '2.0', '1.2.3.4', 'a', 'True'))
>>> check_res == [1, 2.0, '1.2.3.4', 'a', True]
1
>>> vtor.check(mix_str, ('b', 2.0, '1.2.3.4', 'a', True)) # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "b" is of the wrong type.
>>> vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a')) # doctest: +SKIP
Traceback (most recent call last):
VdtValueTooShortError: the value "(1, 2.0, '1.2.3.4', 'a')" is too short.
>>> vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a', 1, 'b')) # doctest: +SKIP
Traceback (most recent call last):
VdtValueTooLongError: the value "(1, 2.0, '1.2.3.4', 'a', 1, 'b')" is too long.
>>> vtor.check(mix_str, 0) # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "0" is of the wrong type.
This test requires an elaborate setup, because of a change in error string
output from the interpreter between Python 2.2 and 2.3 .
>>> res_seq = (
... 'passed an incorrect value "',
... 'yoda',
... '" for parameter "mixed_list".',
... )
>>> res_str = "'".join(res_seq)
>>> try:
... vtor.check('mixed_list("yoda")', ('a'))
... except VdtParamError as err:
... str(err) == res_str
1
"""
try:
length = len(value)
except TypeError:
raise VdtTypeError(value)
if length < len(args):
raise VdtValueTooShortError(value)
elif length > len(args):
raise VdtValueTooLongError(value)
try:
return [fun_dict[arg](val) for arg, val in zip(args, value)]
except KeyError as e:
raise(VdtParamError('mixed_list', e))
|
This check matches the value to any of a set of options.
>>> vtor = Validator()
>>> vtor.check('option("yoda", "jedi")', 'yoda')
'yoda'
>>> vtor.check('option("yoda", "jedi")', 'jed') # doctest: +SKIP
Traceback (most recent call last):
VdtValueError: the value "jed" is unacceptable.
>>> vtor.check('option("yoda", "jedi")', 0) # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "0" is of the wrong type.
def is_option(value, *options):
"""
This check matches the value to any of a set of options.
>>> vtor = Validator()
>>> vtor.check('option("yoda", "jedi")', 'yoda')
'yoda'
>>> vtor.check('option("yoda", "jedi")', 'jed') # doctest: +SKIP
Traceback (most recent call last):
VdtValueError: the value "jed" is unacceptable.
>>> vtor.check('option("yoda", "jedi")', 0) # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "0" is of the wrong type.
"""
if not isinstance(value, string_types):
raise VdtTypeError(value)
if not value in options:
raise VdtValueError(value)
return value
|
Usage: check(check, value)
Arguments:
check: string representing check to apply (including arguments)
value: object to be checked
Returns value, converted to correct type if necessary
If the check fails, raises a ``ValidateError`` subclass.
>>> vtor = Validator()
>>> vtor.check('yoda', '') # doctest: +SKIP
Traceback (most recent call last):
VdtUnknownCheckError: the check "yoda" is unknown.
>>> vtor.check('yoda()', '') # doctest: +SKIP
Traceback (most recent call last):
VdtUnknownCheckError: the check "yoda" is unknown.
>>> vtor.check('string(default="")', '', missing=True)
''
def check(self, check, value, missing=False):
"""
Usage: check(check, value)
Arguments:
check: string representing check to apply (including arguments)
value: object to be checked
Returns value, converted to correct type if necessary
If the check fails, raises a ``ValidateError`` subclass.
>>> vtor = Validator()
>>> vtor.check('yoda', '') # doctest: +SKIP
Traceback (most recent call last):
VdtUnknownCheckError: the check "yoda" is unknown.
>>> vtor.check('yoda()', '') # doctest: +SKIP
Traceback (most recent call last):
VdtUnknownCheckError: the check "yoda" is unknown.
>>> vtor.check('string(default="")', '', missing=True)
''
"""
fun_name, fun_args, fun_kwargs, default = self._parse_with_caching(check)
if missing:
if default is None:
# no information needed here - to be handled by caller
raise VdtMissingValue()
value = self._handle_none(default)
if value is None:
return None
return self._check_value(value, fun_name, fun_args, fun_kwargs)
|
Unquote a value if necessary.
def _unquote(self, val):
"""Unquote a value if necessary."""
if (len(val) >= 2) and (val[0] in ("'", '"')) and (val[0] == val[-1]):
val = val[1:-1]
return val
|
Take apart a ``keyword=list('val, 'val')`` type string.
def _list_handle(self, listmatch):
"""Take apart a ``keyword=list('val, 'val')`` type string."""
out = []
name = listmatch.group(1)
args = listmatch.group(2)
for arg in self._list_members.findall(args):
out.append(self._unquote(arg))
return name, out
|
Given a check, return the default value for the check
(converted to the right type).
If the check doesn't specify a default value then a
``KeyError`` will be raised.
def get_default_value(self, check):
"""
Given a check, return the default value for the check
(converted to the right type).
If the check doesn't specify a default value then a
``KeyError`` will be raised.
"""
fun_name, fun_args, fun_kwargs, default = self._parse_with_caching(check)
if default is None:
raise KeyError('Check "%s" has no default value.' % check)
value = self._handle_none(default)
if value is None:
return value
return self._check_value(value, fun_name, fun_args, fun_kwargs)
|
A constant value HDU will only be recognized as such if the header
contains a valid PIXVALUE and NAXIS == 0.
def match_header(cls, header):
"""A constant value HDU will only be recognized as such if the header
contains a valid PIXVALUE and NAXIS == 0.
"""
pixvalue = header.get('PIXVALUE')
naxis = header.get('NAXIS', 0)
return (super(_ConstantValueImageBaseHDU, cls).match_header(header) and
(isinstance(pixvalue, float) or _is_int(pixvalue)) and
naxis == 0)
|
Verify that the HDU's data is a constant value array.
def _check_constant_value_data(self, data):
"""Verify that the HDU's data is a constant value array."""
arrayval = data.flat[0]
if np.all(data == arrayval):
return arrayval
return None
|
Return currently selected index (or -1)
def get_current_index(self):
""" Return currently selected index (or -1) """
# Need to convert to int; currently API returns a tuple of string
curSel = self.__lb.curselection()
if curSel and len(curSel) > 0:
return int(curSel[0])
else:
return -1
|
Input GEIS files "input" will be read and a HDUList object will
be returned that matches the waiver-FITS format written out by 'stwfits' in IRAF.
The user can use the writeto method to write the HDUList object to
a FITS file.
def convert(input):
"""Input GEIS files "input" will be read and a HDUList object will
be returned that matches the waiver-FITS format written out by 'stwfits' in IRAF.
The user can use the writeto method to write the HDUList object to
a FITS file.
"""
global dat
cardLen = fits.Card.length
# input file(s) must be of the form *.??h and *.??d
if input[-1] != 'h' or input[-4] != '.':
raise "Illegal input GEIS file name %s" % input
data_file = input[:-1]+'d'
_os = sys.platform
if _os[:5] == 'linux' or _os[:5] == 'win32' or _os[:5] == 'sunos' or _os[:3] == 'osf' or _os[:6] == 'darwin':
bytes_per_line = cardLen+1
else:
raise "Platform %s is not supported (yet)." % _os
end_card = 'END'+' '* (cardLen-3)
# open input file
im = open(input)
# Generate the primary HDU
cards = []
while 1:
line = im.read(bytes_per_line)[:cardLen]
line = line[:8].upper() + line[8:]
if line == end_card:
break
cards.append(fits.Card.fromstring(line))
phdr = fits.Header(cards)
im.close()
# Determine starting point for adding Group Parameter Block keywords to Primary header
phdr_indx = phdr.index('PSIZE')
_naxis0 = phdr.get('NAXIS', 0)
_naxis = [phdr['NAXIS'+str(j)] for j in range(1, _naxis0+1)]
_naxis.insert(0, _naxis0)
_bitpix = phdr['BITPIX']
_psize = phdr['PSIZE']
if phdr['DATATYPE'][:4] == 'REAL':
_bitpix = -_bitpix
if _naxis0 > 0:
size = reduce(lambda x,y:x*y, _naxis[1:])
data_size = abs(_bitpix) * size // 8
else:
data_size = 0
group_size = data_size + _psize // 8
# decode the group parameter definitions,
# group parameters will become extension table
groups = phdr['GROUPS']
gcount = phdr['GCOUNT']
pcount = phdr['PCOUNT']
formats = []
bools = []
floats = []
cols = [] # column definitions used for extension table
cols_dict = {} # provides name access to Column defs
_range = range(1, pcount+1)
key = [phdr['PTYPE'+str(j)] for j in _range]
comm = [phdr.cards['PTYPE'+str(j)].comment for j in _range]
# delete group parameter definition header keywords
_list = ['PTYPE'+str(j) for j in _range] + \
['PDTYPE'+str(j) for j in _range] + \
['PSIZE'+str(j) for j in _range] + \
['DATATYPE', 'PSIZE', 'GCOUNT', 'PCOUNT', 'BSCALE', 'BZERO']
# Construct record array formats for the group parameters
# as interpreted from the Primary header file
for i in range(1, pcount+1):
ptype = key[i-1]
pdtype = phdr['PDTYPE'+str(i)]
star = pdtype.find('*')
_type = pdtype[:star]
_bytes = pdtype[star+1:]
# collect boolean keywords since they need special attention later
if _type == 'LOGICAL':
bools.append(i)
if pdtype == 'REAL*4':
floats.append(i)
# identify keywords which require conversion to special units
if ptype in kw_DOUBLE:
_type = 'DOUBLE'
fmt = geis_fmt[_type] + _bytes
formats.append((ptype,fmt))
# Set up definitions for use in creating the group-parameter block table
nrpt = ''
nbits = str(int(_bytes)*8)
if 'CHAR' in _type:
nrpt = _bytes
nbits = _bytes
afmt = cols_fmt[_type]+ nbits
if 'LOGICAL' in _type:
afmt = cols_fmt[_type]
cfmt = cols_pfmt[_type]+nrpt
#print 'Column format for ',ptype,': ',cfmt,' with dtype of ',afmt
cols_dict[ptype] = fits.Column(name=ptype,format=cfmt,array=numpy.zeros(gcount,dtype=afmt))
cols.append(cols_dict[ptype]) # This keeps the columns in order
_shape = _naxis[1:]
_shape.reverse()
_code = fits.BITPIX2DTYPE[_bitpix]
_bscale = phdr.get('BSCALE', 1)
_bzero = phdr.get('BZERO', 0)
if phdr['DATATYPE'][:10] == 'UNSIGNED*2':
_uint16 = 1
_bzero = 32768
else:
_uint16 = 0
# delete from the end, so it will not conflict with previous delete
for i in range(len(phdr)-1, -1, -1):
if phdr.cards[i].keyword in _list:
del phdr[i]
# clean up other primary header keywords
phdr['SIMPLE'] = True
phdr['GROUPS'] = False
_after = 'NAXIS'
if _naxis0 > 0:
_after += str(_naxis0)
phdr.set('EXTEND', value=True, comment="FITS dataset may contain extensions", after=_after)
# Use copy-on-write for all data types since byteswap may be needed
# in some platforms.
f1 = open(data_file, mode='rb')
dat = f1.read()
errormsg = ""
# Define data array for all groups
arr_shape = _naxis[:]
arr_shape[0] = gcount
arr_stack = numpy.zeros(arr_shape,dtype=_code)
loc = 0
for k in range(gcount):
ext_dat = numpy.fromstring(dat[loc:loc+data_size], dtype=_code)
ext_dat = ext_dat.reshape(_shape)
if _uint16:
ext_dat += _bzero
# Check to see whether there are any NaN's or infs which might indicate
# a byte-swapping problem, such as being written out on little-endian
# and being read in on big-endian or vice-versa.
if _code.find('float') >= 0 and \
(numpy.any(numpy.isnan(ext_dat)) or numpy.any(numpy.isinf(ext_dat))):
errormsg += "===================================\n"
errormsg += "= WARNING: =\n"
errormsg += "= Input image: =\n"
errormsg += input+"[%d]\n"%(k+1)
errormsg += "= had floating point data values =\n"
errormsg += "= of NaN and/or Inf. =\n"
errormsg += "===================================\n"
elif _code.find('int') >= 0:
# Check INT data for max values
ext_dat_frac,ext_dat_exp = numpy.frexp(ext_dat)
if ext_dat_exp.max() == int(_bitpix) - 1:
# Potential problems with byteswapping
errormsg += "===================================\n"
errormsg += "= WARNING: =\n"
errormsg += "= Input image: =\n"
errormsg += input+"[%d]\n"%(k+1)
errormsg += "= had integer data values =\n"
errormsg += "= with maximum bitvalues. =\n"
errormsg += "===================================\n"
arr_stack[k] = ext_dat
#ext_hdu = fits.hdu.ImageHDU(data=ext_dat)
rec = numpy.fromstring(dat[loc+data_size:loc+group_size], dtype=formats)
loc += group_size
# Add data from this GPB to table
for i in range(1, pcount+1):
val = rec[0][i-1]
if i in bools:
if val:
val = 'T'
else:
val = 'F'
cols[i-1].array[k] = val
# Based on the first group, add GPB keywords to PRIMARY header
if k == 0:
# Create separate PyFITS Card objects for each entry in 'rec'
# and update Primary HDU with these keywords after PSIZE
for i in range(1, pcount+1):
#val = rec.field(i-1)[0]
val = rec[0][i-1]
if val.dtype.kind == 'S':
val = val.decode('ascii')
if i in bools:
if val:
val = True
else:
val = False
elif i in floats:
# use fromstring, format in Card is deprecated in pyfits 0.9
_str = '%-8s= %20.13G / %s' % (key[i-1], val, comm[i-1])
_card = fits.Card.fromstring(_str)
else:
_card = fits.Card(key=key[i-1], value=val, comment=comm[i-1])
phdr.insert(phdr_indx+i, _card)
# deal with bscale/bzero
if (_bscale != 1 or _bzero != 0):
phdr['BSCALE'] = _bscale
phdr['BZERO'] = _bzero
#hdulist.append(ext_hdu)
# Define new table based on Column definitions
ext_table = fits.TableHDU.from_columns(cols)
ext_table.header.set('EXTNAME', value=input+'.tab', after='TFIELDS')
# Add column descriptions to header of table extension to match stwfits output
for i in range(len(key)):
ext_table.header.append(fits.Card(keyword=key[i], value=comm[i]))
if errormsg != "":
errormsg += "===================================\n"
errormsg += "= This file may have been =\n"
errormsg += "= written out on a platform =\n"
errormsg += "= with a different byte-order. =\n"
errormsg += "= =\n"
errormsg += "= Please verify that the values =\n"
errormsg += "= are correct or apply the =\n"
errormsg += "= '.byteswap()' method. =\n"
errormsg += "===================================\n"
print(errormsg)
f1.close()
hdulist = fits.HDUList([fits.PrimaryHDU(header=phdr, data=arr_stack)])
hdulist.append(ext_table)
stsci2(hdulist,input)
return hdulist
|
loads configuration from some predictable locations.
:return: the config.
def _loadConfig(self):
"""
loads configuration from some predictable locations.
:return: the config.
"""
configPath = path.join(self._getConfigPath(), self._name + ".yml")
if os.path.exists(configPath):
self.logger.warning("Loading config from " + configPath)
with open(configPath, 'r') as yml:
return yaml.load(yml, Loader=yaml.FullLoader)
defaultConfig = self.loadDefaultConfig()
self._storeConfig(defaultConfig, configPath)
return defaultConfig
|
Writes the config to the configPath.
:param config a dict of config.
:param configPath the path to the file to write to, intermediate dirs will be created as necessary.
def _storeConfig(self, config, configPath):
"""
Writes the config to the configPath.
:param config a dict of config.
:param configPath the path to the file to write to, intermediate dirs will be created as necessary.
"""
self.logger.info("Writing to " + str(configPath))
os.makedirs(os.path.dirname(configPath), exist_ok=True)
with (open(configPath, 'w')) as yml:
yaml.dump(config, yml, default_flow_style=False)
|
Gets the currently configured config path.
:return: the path, raises ValueError if it doesn't exist.
def _getConfigPath(self):
"""
Gets the currently configured config path.
:return: the path, raises ValueError if it doesn't exist.
"""
confHome = environ.get('VIBE_CONFIG_HOME')
return confHome if confHome is not None else path.join(path.expanduser("~"), '.vibe')
|
Configures the python logging system to log to a debug file and to stdout for warn and above.
:return: the base logger.
def configureLogger(self):
"""
Configures the python logging system to log to a debug file and to stdout for warn and above.
:return: the base logger.
"""
baseLogLevel = logging.DEBUG if self.isDebugLogging() else logging.INFO
# create recorder app root logger
logger = logging.getLogger(self._name)
logger.setLevel(baseLogLevel)
# file handler
fh = handlers.RotatingFileHandler(path.join(self._getConfigPath(), self._name + '.log'),
maxBytes=10 * 1024 * 1024, backupCount=10)
fh.setLevel(baseLogLevel)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.WARN)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(funcName)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
return logger
|
Translate IBM-format floating point numbers (as bytes) to IEEE 754
64-bit floating point format (as Python float).
def ibm_to_ieee(ibm):
'''
Translate IBM-format floating point numbers (as bytes) to IEEE 754
64-bit floating point format (as Python float).
'''
# IBM mainframe: sign * 0.mantissa * 16 ** (exponent - 64)
# Python uses IEEE: sign * 1.mantissa * 2 ** (exponent - 1023)
# Pad-out to 8 bytes if necessary. We expect 2 to 8 bytes, but
# there's no need to check; bizarre sizes will cause a struct
# module unpack error.
ibm = ibm.ljust(8, b'\x00')
# parse the 64 bits of IBM float as one 8-byte unsigned long long
ulong, = struct.unpack('>Q', ibm)
# IBM: 1-bit sign, 7-bits exponent, 56-bits mantissa
sign = ulong & 0x8000000000000000
exponent = (ulong & 0x7f00000000000000) >> 56
mantissa = ulong & 0x00ffffffffffffff
if mantissa == 0:
if ibm[0:1] == b'\x00':
return 0.0
elif ibm[0:1] in b'_.ABCDEFGHIJKLMNOPQRSTUVWXYZ':
return float('nan')
else:
raise ValueError('Neither zero nor NaN: %r' % ibm)
# IBM-format exponent is base 16, so the mantissa can have up to 3
# leading zero-bits in the binary mantissa. IEEE format exponent
# is base 2, so we don't need any leading zero-bits and will shift
# accordingly. This is one of the criticisms of IBM-format, its
# wobbling precision.
if ulong & 0x0080000000000000:
shift = 3
elif ulong & 0x0040000000000000:
shift = 2
elif ulong & 0x0020000000000000:
shift = 1
else:
shift = 0
mantissa >>= shift
# clear the 1 bit to the left of the binary point
# this is implicit in IEEE specification
mantissa &= 0xffefffffffffffff
# IBM exponent is excess 64, but we subtract 65, because of the
# implicit 1 left of the radix point for the IEEE mantissa
exponent -= 65
# IBM exponent is base 16, IEEE is base 2, so we multiply by 4
exponent <<= 2
# IEEE exponent is excess 1023, but we also increment for each
# right-shift when aligning the mantissa's first 1-bit
exponent += shift + 1023
# IEEE: 1-bit sign, 11-bits exponent, 52-bits mantissa
# We didn't shift the sign bit, so it's already in the right spot
ieee = sign | (exponent << 52) | mantissa
return struct.unpack(">d", struct.pack(">Q", ieee))[0]
|
Translate Python floating point numbers to IBM-format (as bytes).
def ieee_to_ibm(ieee):
'''
Translate Python floating point numbers to IBM-format (as bytes).
'''
# Python uses IEEE: sign * 1.mantissa * 2 ** (exponent - 1023)
# IBM mainframe: sign * 0.mantissa * 16 ** (exponent - 64)
if ieee == 0.0:
return b'\x00' * 8
if ieee is None or math.isnan(ieee):
return b'_' + b'\x00' * 7
if math.isinf(ieee):
raise NotImplementedError('Cannot convert infinity')
bits = struct.pack('>d', ieee)
ulong, = struct.unpack('>Q', bits)
sign = (ulong & (1 << 63)) >> 63 # 1-bit sign
exponent = ((ulong & (0x7ff << 52)) >> 52) - 1023 # 11-bits exponent
mantissa = ulong & 0x000fffffffffffff # 52-bits mantissa/significand
if exponent > 248:
msg = 'Cannot store magnitude more than ~ 16 ** 63 as IBM-format'
raise Overflow(msg)
if exponent < -260:
msg = 'Cannot store magnitude less than ~ 16 ** -65 as IBM-format'
raise Underflow(msg)
# IEEE mantissa has an implicit 1 left of the radix: 1.significand
# IBM mantissa has an implicit 0 left of the radix: 0.significand
# We must bitwise-or the implicit 1.mmm into the mantissa
# later we will increment the exponent to account for this change
mantissa = 0x0010000000000000 | mantissa
# IEEE exponents are for base 2: mantissa * 2 ** exponent
# IBM exponents are for base 16: mantissa * 16 ** exponent
# We must divide the exponent by 4, since 16 ** x == 2 ** (4 * x)
quotient, remainder = divmod(exponent, 4)
exponent = quotient
# We don't want to lose information;
# the remainder from the divided exponent adjusts the mantissa
mantissa <<= remainder
# Increment exponent, because of earlier adjustment to mantissa
# this corresponds to the 1.mantissa vs 0.mantissa implicit bit
exponent += 1
# IBM exponents are excess 64
exponent += 64
# IBM has 1-bit sign, 7-bits exponent, and 56-bits mantissa.
# We must shift the sign and exponent into their places.
sign <<= 63
exponent <<= 56
# We lose some precision, but who said floats were perfect?
return struct.pack('>Q', sign | exponent | mantissa)
|
Serialize ``columns`` to a JSON formatted ``bytes`` object.
def dumps(columns):
'''
Serialize ``columns`` to a JSON formatted ``bytes`` object.
'''
fp = BytesIO()
dump(columns, fp)
fp.seek(0)
return fp.read()
|
Parse the 3-line (240-byte) header of a SAS XPORT file.
def match(header):
'''
Parse the 3-line (240-byte) header of a SAS XPORT file.
'''
mo = Library.header_re.match(header)
if mo is None:
raise ValueError(f'Not a SAS Version 5 or 6 XPORT file')
return {
'created': strptime(mo['created']),
'modified': strptime(mo['modified']),
'sas_version': float(mo['version']),
'os_version': mo['os'].decode().strip(),
}
|
Parse the 4-line (320-byte) library member header.
def header_match(cls, header):
'''
Parse the 4-line (320-byte) library member header.
'''
mo = cls.header_re.match(header)
if mo is None:
msg = f'Expected {cls.header_re.pattern!r}, got {header!r}'
raise ValueError(msg)
return {
'name': mo['name'].decode().strip(),
'label': mo['label'].decode().strip(),
'type': mo['type'].decode().strip(),
'created': strptime(mo['created']),
'modified': strptime(mo['modified']),
'sas_version': float(mo['version']),
'os_version': mo['os'].decode().strip(),
'namestr_size': mo['descriptor_size'],
}
|
Parse a member namestrs header (1 line, 80 bytes).
def header_match(cls, data):
'''
Parse a member namestrs header (1 line, 80 bytes).
'''
mo = cls.header_re.match(data)
return int(mo['n_variables'])
|
Parse variable metadata for a XPORT file member.
def readall(cls, member, size):
'''
Parse variable metadata for a XPORT file member.
'''
fp = member.library.fp
LINE = member.library.LINE
n = cls.header_match(fp.read(LINE))
namestrs = [fp.read(size) for i in range(n)]
# Each namestr field is 140 bytes long, but the fields are
# streamed together and broken in 80-byte pieces. If the last
# byte of the last namestr field does not fall in the last byte
# of the 80-byte record, the record is padded with ASCII blanks
# to 80 bytes.
remainder = n * size % LINE
if remainder:
padding = 80 - remainder
fp.read(padding)
info = [cls.unpack(s) for s in namestrs]
for d in info:
d['format'] = Format(**d['format'])
d['iformat'] = InputFormat(**d['iformat'])
return [Variable(**d) for d in info]
|
Determine Julian day count from Islamic date
def to_jd(year, month, day):
'''Determine Julian day count from Islamic date'''
return (day + ceil(29.5 * (month - 1)) + (year - 1) * 354 + trunc((3 + (11 * year)) / 30) + EPOCH) - 1
|
Calculate Islamic date from Julian day
def from_jd(jd):
'''Calculate Islamic date from Julian day'''
jd = trunc(jd) + 0.5
year = trunc(((30 * (jd - EPOCH)) + 10646) / 10631)
month = min(12, ceil((jd - (29 + to_jd(year, 1, 1))) / 29.5) + 1)
day = int(jd - to_jd(year, month, 1)) + 1
return (year, month, day)
|
Go over all widgets and let them know they have been edited
recently and they need to check for any trigger actions. This
would be used right after all the widgets have their values
set or forced (e.g. via setAllEntriesFromParList).
def checkAllTriggers(self, action):
""" Go over all widgets and let them know they have been edited
recently and they need to check for any trigger actions. This
would be used right after all the widgets have their values
set or forced (e.g. via setAllEntriesFromParList). """
for entry in self.entryNo:
entry.widgetEdited(action=action, skipDups=False)
|
Did something which requires a new look. Move scrollbar up.
This often needs to be delayed a bit however, to let other
events in the queue through first.
def freshenFocus(self):
""" Did something which requires a new look. Move scrollbar up.
This often needs to be delayed a bit however, to let other
events in the queue through first. """
self.top.update_idletasks()
self.top.after(10, self.setViewAtTop)
|
Mouse Wheel - under tkinter we seem to need Tk v8.5+ for this
def mwl(self, event):
"""Mouse Wheel - under tkinter we seem to need Tk v8.5+ for this """
if event.num == 4: # up on Linux
self.top.f.canvas.yview_scroll(-1*self._tmwm, 'units')
elif event.num == 5: # down on Linux
self.top.f.canvas.yview_scroll(1*self._tmwm, 'units')
else: # assume event.delta has the direction, but reversed sign
self.top.f.canvas.yview_scroll(-(event.delta)*self._tmwm, 'units')
|
Set focus to next item in sequence
def focusNext(self, event):
"""Set focus to next item in sequence"""
try:
event.widget.tk_focusNext().focus_set()
except TypeError:
# see tkinter equivalent code for tk_focusNext to see
# commented original version
name = event.widget.tk.call('tk_focusNext', event.widget._w)
event.widget._nametowidget(str(name)).focus_set()
|
Set focus to previous item in sequence
def focusPrev(self, event):
"""Set focus to previous item in sequence"""
try:
event.widget.tk_focusPrev().focus_set()
except TypeError:
# see tkinter equivalent code for tk_focusPrev to see
# commented original version
name = event.widget.tk.call('tk_focusPrev', event.widget._w)
event.widget._nametowidget(str(name)).focus_set()
|
Scroll the panel down to ensure widget with focus to be visible
Tracks the last widget that doScroll was called for and ignores
repeated calls. That handles the case where the focus moves not
between parameter entries but to someplace outside the hierarchy.
In that case the scrolling is not expected.
Returns false if the scroll is ignored, else true.
def doScroll(self, event):
"""Scroll the panel down to ensure widget with focus to be visible
Tracks the last widget that doScroll was called for and ignores
repeated calls. That handles the case where the focus moves not
between parameter entries but to someplace outside the hierarchy.
In that case the scrolling is not expected.
Returns false if the scroll is ignored, else true.
"""
canvas = self.top.f.canvas
widgetWithFocus = event.widget
if widgetWithFocus is self.lastFocusWidget:
return FALSE
self.lastFocusWidget = widgetWithFocus
if widgetWithFocus is None:
return TRUE
# determine distance of widget from top & bottom edges of canvas
y1 = widgetWithFocus.winfo_rooty()
y2 = y1 + widgetWithFocus.winfo_height()
cy1 = canvas.winfo_rooty()
cy2 = cy1 + canvas.winfo_height()
yinc = self.yscrollincrement
if y1<cy1:
# this will continue to work when integer division goes away
sdist = int((y1-cy1-yinc+1.)/yinc)
canvas.yview_scroll(sdist, "units")
elif cy2<y2:
sdist = int((y2-cy2+yinc-1.)/yinc)
canvas.yview_scroll(sdist, "units")
return TRUE
|
Handle the situation where two par lists do not match.
This is meant to allow subclasses to override. Note that this only
handles "missing" pars and "extra" pars, not wrong-type pars.
def _handleParListMismatch(self, probStr, extra=False):
""" Handle the situation where two par lists do not match.
This is meant to allow subclasses to override. Note that this only
handles "missing" pars and "extra" pars, not wrong-type pars. """
errmsg = 'ERROR: mismatch between default and current par lists ' + \
'for task "'+self.taskName+'"'
if probStr:
errmsg += '\n\t'+probStr
errmsg += '\n(try: "unlearn '+self.taskName+'")'
print(errmsg)
return False
|
This creates self.defaultParamList. It also does some checks
on the paramList, sets its order if needed, and deletes any extra
or unknown pars if found. We assume the order of self.defaultParamList
is the correct order.
def _setupDefaultParamList(self):
""" This creates self.defaultParamList. It also does some checks
on the paramList, sets its order if needed, and deletes any extra
or unknown pars if found. We assume the order of self.defaultParamList
is the correct order. """
# Obtain the default parameter list
self.defaultParamList = self._taskParsObj.getDefaultParList()
theParamList = self._taskParsObj.getParList()
# Lengths are probably equal but this isn't necessarily an error
# here, so we check for differences below.
if len(self.defaultParamList) != len(theParamList):
# whoa, lengths don't match (could be some missing or some extra)
pmsg = 'Current list not same length as default list'
if not self._handleParListMismatch(pmsg):
return False
# convert current par values to a dict of { par-fullname:par-object }
# for use below
ourpardict = {}
for par in theParamList: ourpardict[par.fullName()] = par
# Sort our paramList according to the order of the defaultParamList
# and repopulate the list according to that order. Create sortednames.
sortednames = [p.fullName() for p in self.defaultParamList]
# Rebuild par list sorted into correct order. Also find/flag any
# missing pars or any extra/unknown pars. This automatically deletes
# "extras" by not adding them to the sorted list in the first place.
migrated = []
newList = []
for fullName in sortednames:
if fullName in ourpardict:
newList.append(ourpardict[fullName])
migrated.append(fullName) # make sure all get moved over
else: # this is a missing par - insert the default version
theDfltVer = \
[p for p in self.defaultParamList if p.fullName()==fullName]
newList.append(copy.deepcopy(theDfltVer[0]))
# Update! Next line writes to the self._taskParsObj.getParList() obj
theParamList[:] = newList # fill with newList, keep same mem pointer
# See if any got left out
extras = [fn for fn in ourpardict if not fn in migrated]
for fullName in extras:
# this is an extra/unknown par - let subclass handle it
if not self._handleParListMismatch('Unexpected par: "'+\
fullName+'"', extra=True):
return False
print('Ignoring unexpected par: "'+p+'"')
# return value indicates that all is well to continue
return True
|
Make an entire section (minus skipList items) either active or
inactive. sectionName is the same as the param's scope.
def _toggleSectionActiveState(self, sectionName, state, skipList):
""" Make an entire section (minus skipList items) either active or
inactive. sectionName is the same as the param's scope. """
# Get model data, the list of pars
theParamList = self._taskParsObj.getParList()
# Loop over their assoc. entries
for i in range(self.numParams):
if theParamList[i].scope == sectionName:
if skipList and theParamList[i].name in skipList:
# self.entryNo[i].setActiveState(True) # these always active
pass # if it started active, we don't need to reactivate it
else:
self.entryNo[i].setActiveState(state)
|
Save the parameter settings to a user-specified file. Any
changes here must be coordinated with the corresponding tpar save_as
function.
def saveAs(self, event=None):
""" Save the parameter settings to a user-specified file. Any
changes here must be coordinated with the corresponding tpar save_as
function. """
self.debug('Clicked Save as...')
# On Linux Pers..Dlg causes the cwd to change, so get a copy of current
curdir = os.getcwd()
# The user wishes to save to a different name
writeProtChoice = self._writeProtectOnSaveAs
if capable.OF_TKFD_IN_EPAR:
# Prompt using native looking dialog
fname = asksaveasfilename(parent=self.top,
title='Save Parameter File As',
defaultextension=self._defSaveAsExt,
initialdir=os.path.dirname(self._getSaveAsFilter()))
else:
# Prompt. (could use tkinter's FileDialog, but this one is prettier)
# initWProtState is only used in the 1st call of a session
from . import filedlg
fd = filedlg.PersistSaveFileDialog(self.top,
"Save Parameter File As", self._getSaveAsFilter(),
initWProtState=writeProtChoice)
if fd.Show() != 1:
fd.DialogCleanup()
os.chdir(curdir) # in case file dlg moved us
return
fname = fd.GetFileName()
writeProtChoice = fd.GetWriteProtectChoice()
fd.DialogCleanup()
if not fname: return # canceled
# First check the child parameters, aborting save if
# invalid entries were encountered
if self.checkSetSaveChildren():
os.chdir(curdir) # in case file dlg moved us
return
# Run any subclass-specific steps right before the save
self._saveAsPreSave_Hook(fname)
# Verify all the entries (without save), keeping track of the invalid
# entries which have been reset to their original input values
self.badEntriesList = self.checkSetSaveEntries(doSave=False)
# If there were invalid entries, prepare the message dialog
if self.badEntriesList:
ansOKCANCEL = self.processBadEntries(self.badEntriesList,
self.taskName)
if not ansOKCANCEL:
os.chdir(curdir) # in case file dlg moved us
return
# If there were no invalid entries or the user says OK, finally
# save to their stated file. Since we have already processed the
# bad entries, there should be none returned.
mstr = "TASKMETA: task="+self.taskName+" package="+self.pkgName
if self.checkSetSaveEntries(doSave=True, filename=fname, comment=mstr,
set_ro=writeProtChoice,
overwriteRO=True):
os.chdir(curdir) # in case file dlg moved us
raise Exception("Unexpected bad entries for: "+self.taskName)
# Run any subclass-specific steps right after the save
self._saveAsPostSave_Hook(fname)
os.chdir(curdir)
|
Pop up the help in a browser window. By default, this tries to
show the help for the current task. With the option arguments, it can
be used to show any help string.
def htmlHelp(self, helpString=None, title=None, istask=False, tag=None):
""" Pop up the help in a browser window. By default, this tries to
show the help for the current task. With the option arguments, it can
be used to show any help string. """
# Check the help string. If it turns out to be a URL, launch that,
# if not, dump it to a quick and dirty tmp html file to make it
# presentable, and pass that file name as the URL.
if not helpString:
helpString = self.getHelpString(self.pkgName+'.'+self.taskName)
if not title:
title = self.taskName
lwr = helpString.lower()
if lwr.startswith("http:") or lwr.startswith("https:") or \
lwr.startswith("file:"):
url = helpString
if tag and url.find('#') < 0:
url += '#'+tag
# print('LAUNCHING: '+url) # DBG
irafutils.launchBrowser(url, subj=title)
else:
# Write it to a temp HTML file to display
(fd, fname) = tempfile.mkstemp(suffix='.html', prefix='editpar_')
os.close(fd)
f = open(fname, 'w')
if istask and self._knowTaskHelpIsHtml:
f.write(helpString)
else:
f.write('<html><head><title>'+title+'</title></head>\n')
f.write('<body><h3>'+title+'</h3>\n')
f.write('<pre>\n'+helpString+'\n</pre></body></html>')
f.close()
irafutils.launchBrowser("file://"+fname, subj=title)
|
Invoke task/epar/etc. help and put the page in a window.
This same logic is used for GUI help, task help, log msgs, etc.
def _showAnyHelp(self, kind, tag=None):
""" Invoke task/epar/etc. help and put the page in a window.
This same logic is used for GUI help, task help, log msgs, etc. """
# sanity check
assert kind in ('epar', 'task', 'log'), 'Unknown help kind: '+str(kind)
#-----------------------------------------
# See if they'd like to view in a browser
#-----------------------------------------
if self._showHelpInBrowser or (kind == 'task' and
self._knowTaskHelpIsHtml):
if kind == 'epar':
self.htmlHelp(helpString=self._appHelpString,
title='Parameter Editor Help')
if kind == 'task':
self.htmlHelp(istask=True, tag=tag)
if kind == 'log':
self.htmlHelp(helpString='\n'.join(self._msgHistory),
title=self._appName+' Event Log')
return
#-----------------------------------------
# Now try to pop up the regular Tk window
#-----------------------------------------
wins = {'epar':self.eparHelpWin,
'task':self.irafHelpWin,
'log': self.logHistWin, }
window = wins[kind]
try:
if window.state() != NORMAL:
window.deiconify()
window.tkraise()
return
except (AttributeError, TclError):
pass
#---------------------------------------------------------
# That didn't succeed (window is still None), so build it
#---------------------------------------------------------
if kind == 'epar':
self.eparHelpWin = self.makeHelpWin(self._appHelpString,
title='Parameter Editor Help')
if kind == 'task':
# Acquire the task help as a string
# Need to include the package name for the task to
# avoid name conflicts with tasks from other packages. WJH
self.irafHelpWin = self.makeHelpWin(self.getHelpString(
self.pkgName+'.'+self.taskName))
if kind == 'log':
self.logHistWin = self.makeHelpWin('\n'.join(self._msgHistory),
title=self._appName+' Event Log')
|
Set all the parameter entry values in the GUI to the values
in the given par list. If 'updateModel' is True, the internal
param list will be updated to the new values as well as the GUI
entries (slower and not always necessary). Note the
corresponding TparDisplay method.
def setAllEntriesFromParList(self, aParList, updateModel=False):
""" Set all the parameter entry values in the GUI to the values
in the given par list. If 'updateModel' is True, the internal
param list will be updated to the new values as well as the GUI
entries (slower and not always necessary). Note the
corresponding TparDisplay method. """
# Get model data, the list of pars
theParamList = self._taskParsObj.getParList() # we may modify members
if len(aParList) != len(theParamList):
showwarning(message="Attempting to set parameter values from a "+ \
"list of different length ("+str(len(aParList))+ \
") than the number shown here ("+ \
str(len(theParamList))+"). Be aware.",
title="Parameter List Length Mismatch")
# LOOP THRU GUI PAR LIST
for i in range(self.numParams):
par = theParamList[i]
if par.type == "pset":
continue # skip PSET's for now
gui_entry = self.entryNo[i]
# Set the value in the paramList before setting it in the GUI
# This may be in the form of a list, or an IrafParList (getValue)
if isinstance(aParList, list):
# Since "aParList" can have them in different order and number
# than we do, we'll have to first find the matching param.
found = False
for newpar in aParList:
if newpar.name==par.name and newpar.scope==par.scope:
par.set(newpar.value) # same as .get(native=1,prompt=0)
found = True
break
# Now see if newpar was found in our list
if not found:
pnm = par.name
if len(par.scope): pnm = par.scope+'.'+par.name
raise UnfoundParamError('Error - Unfound Parameter! \n\n'+\
'Expected parameter "'+pnm+'" for task "'+ \
self.taskName+'". \nThere may be others...')
else: # assume has getValue()
par.set(aParList.getValue(par.name, native=1, prompt=0))
# gui holds a str, but par.value is native; conversion occurs
gui_entry.forceValue(par.value, noteEdited=False) # no triggers yet
if updateModel:
# Update the model values via checkSetSaveEntries
self.badEntriesList = self.checkSetSaveEntries(doSave=False)
# If there were invalid entries, prepare the message dialog
if self.badEntriesList:
self.processBadEntries(self.badEntriesList,
self.taskName, canCancel=False)
|
Return current par value from the GUI. This does not do any
validation, and it it not necessarily the same value saved in the
model, which is always behind the GUI setting, in time. This is NOT
to be used to get all the values - it would not be efficient.
def getValue(self, name, scope=None, native=False):
""" Return current par value from the GUI. This does not do any
validation, and it it not necessarily the same value saved in the
model, which is always behind the GUI setting, in time. This is NOT
to be used to get all the values - it would not be efficient. """
# Get model data, the list of pars
theParamList = self._taskParsObj.getParList()
# NOTE: If par scope is given, it will be used, otherwise it is
# assumed to be unneeded and the first name-match is returned.
fullName = basicpar.makeFullName(scope, name)
# Loop over the parameters to find the requested par
for i in range(self.numParams):
par = theParamList[i] # IrafPar or subclass
entry = self.entryNo[i] # EparOption or subclass
if par.fullName() == fullName or \
(scope is None and par.name == name):
if native:
return entry.convertToNative(entry.choice.get())
else:
return entry.choice.get()
# We didn't find the requested par
raise RuntimeError('Could not find par: "'+fullName+'"')
|
Check, then set, then save the parameter settings for
all child (pset) windows.
Prompts if any problems are found. Returns None
on success, list of bad entries on failure.
def checkSetSaveChildren(self, doSave=True):
"""Check, then set, then save the parameter settings for
all child (pset) windows.
Prompts if any problems are found. Returns None
on success, list of bad entries on failure.
"""
if self.isChild:
return
# Need to get all the entries and verify them.
# Save the children in backwards order to coincide with the
# display of the dialogs (LIFO)
for n in range (len(self.top.childList)-1, -1, -1):
self.badEntriesList = self.top.childList[n]. \
checkSetSaveEntries(doSave=doSave)
if self.badEntriesList:
ansOKCANCEL = self.processBadEntries(self.badEntriesList,
self.top.childList[n].taskName)
if not ansOKCANCEL:
return self.badEntriesList
# If there were no invalid entries or the user says OK,
# close down the child and increment to the next child
self.top.childList[n].top.focus_set()
self.top.childList[n].top.withdraw()
del self.top.childList[n]
# all windows saved successfully
return
|
Internal callback used to make sure the msg list keeps moving.
def _pushMessages(self):
""" Internal callback used to make sure the msg list keeps moving. """
# This continues to get itself called until no msgs are left in list.
self.showStatus('')
if len(self._statusMsgsToShow) > 0:
self.top.after(200, self._pushMessages)
|
Show the given status string, but not until any given delay from
the previous message has expired. keep is a time (secs) to force
the message to remain without being overwritten or cleared. cat
is a string category used only in the historical log.
def showStatus(self, msg, keep=0, cat=None):
""" Show the given status string, but not until any given delay from
the previous message has expired. keep is a time (secs) to force
the message to remain without being overwritten or cleared. cat
is a string category used only in the historical log. """
# prep it, space-wise
msg = msg.strip()
if len(msg) > 0:
# right here is the ideal place to collect a history of messages
forhist = msg
if cat: forhist = '['+cat+'] '+msg
forhist = time.strftime("%a %H:%M:%S")+': '+forhist
self._msgHistory.append(forhist)
# now set the spacing
msg = ' '+msg
# stop here if it is a category not shown in the GUI
if cat == DBG:
return
# see if we can show it
now = time.time()
if now >= self._leaveStatusMsgUntil: # we are clear, can show a msg
# first see if this msg is '' - if so we will show an important
# waiting msg instead of the '', and then pop it off our list
if len(msg) < 1 and len(self._statusMsgsToShow) > 0:
msg, keep = self._statusMsgsToShow[0] # overwrite both args
del self._statusMsgsToShow[0]
# now actuall print the status out to the status widget
self.top.status.config(text = msg)
# reset our delay flag
self._leaveStatusMsgUntil = 0
if keep > 0:
self._leaveStatusMsgUntil = now + keep
else:
# there is a previous message still up, is this one important?
if len(msg) > 0 and keep > 0:
# Uh-oh, this is an important message that we don't want to
# simply skip, but on the other hand we can't show it yet...
# So we add it to _statusMsgsToShow and show it later (asap)
if (msg,keep) not in self._statusMsgsToShow:
if len(self._statusMsgsToShow) < 7:
self._statusMsgsToShow.append( (msg,keep) ) # tuple
# kick off timer loop to get this one pushed through
if len(self._statusMsgsToShow) == 1:
self._pushMessages()
else:
# should never happen, but just in case
print("Lost message!: "+msg+" (too far behind...)")
|
Start the GUI session, or simply load a task's ConfigObj.
def teal(theTask, parent=None, loadOnly=False, returnAs="dict",
canExecute=True, strict=False, errorsToTerm=False,
autoClose=True, defaults=False):
# overrides=None):
""" Start the GUI session, or simply load a task's ConfigObj. """
if loadOnly: # this forces returnAs="dict"
obj = None
try:
obj = cfgpars.getObjectFromTaskArg(theTask, strict, defaults)
# obj.strictUpdate(overrides) # ! would need to re-verify after this !
except Exception as re: # catches RuntimeError and KeyError and ...
# Since we are loadOnly, don't pop up the GUI for this
if strict:
raise
else:
print(re.message.replace('\n\n','\n'))
return obj
else:
assert returnAs in ("dict", "status", None), \
"Invalid value for returnAs arg: "+str(returnAs)
dlg = None
try:
# if setting to all defaults, go ahead and load it here, pre-GUI
if defaults:
theTask = cfgpars.getObjectFromTaskArg(theTask, strict, True)
# now create/run the dialog
dlg = ConfigObjEparDialog(theTask, parent=parent,
autoClose=autoClose,
strict=strict,
canExecute=canExecute)
# overrides=overrides)
except cfgpars.NoCfgFileError as ncf:
log_last_error()
if errorsToTerm:
print(str(ncf).replace('\n\n','\n'))
else:
popUpErr(parent=parent,message=str(ncf),title="Unfound Task")
except Exception as re: # catches RuntimeError and KeyError and ...
log_last_error()
if errorsToTerm:
print(re.message.replace('\n\n','\n'))
else:
popUpErr(parent=parent, message=re.message,
title="Bad Parameters")
# Return, depending on the mode in which we are operating
if returnAs is None:
return
if returnAs == "dict":
if dlg is None or dlg.canceled():
return None
else:
return dlg.getTaskParsObj()
# else, returnAs == "status"
if dlg is None or dlg.canceled():
return -1
if dlg.executed():
return 1
return 0
|
Shortcut to load TEAL .cfg files for non-GUI access where
loadOnly=True.
def load(theTask, canExecute=True, strict=True, defaults=False):
""" Shortcut to load TEAL .cfg files for non-GUI access where
loadOnly=True. """
return teal(theTask, parent=None, loadOnly=True, returnAs="dict",
canExecute=canExecute, strict=strict, errorsToTerm=True,
defaults=defaults)
|
Find the task named taskPkgName, and delete any/all user-owned
.cfg files in the user's resource directory which apply to that task.
Like a unix utility, this returns 0 on success (no files found or only
1 found but deleted). For multiple files found, this uses deleteAll,
returning the file-name-list if deleteAll is False (to indicate the
problem) and without deleting any files. MUST check return value.
This does not prompt the user or print to the screen.
def unlearn(taskPkgName, deleteAll=False):
""" Find the task named taskPkgName, and delete any/all user-owned
.cfg files in the user's resource directory which apply to that task.
Like a unix utility, this returns 0 on success (no files found or only
1 found but deleted). For multiple files found, this uses deleteAll,
returning the file-name-list if deleteAll is False (to indicate the
problem) and without deleting any files. MUST check return value.
This does not prompt the user or print to the screen. """
# this WILL throw an exception if the taskPkgName isn't found
flist = cfgpars.getUsrCfgFilesForPyPkg(taskPkgName) # can raise
if flist is None or len(flist) == 0:
return 0
if len(flist) == 1:
os.remove(flist[0])
return 0
# at this point, we know more than one matching file was found
if deleteAll:
for f in flist:
os.remove(f)
return 0
else:
return flist
|
Load the given file (or existing object), and return a dict
of its values which are different from the default values. If report
is set, print to stdout the differences.
def diffFromDefaults(theTask, report=False):
""" Load the given file (or existing object), and return a dict
of its values which are different from the default values. If report
is set, print to stdout the differences. """
# get the 2 dicts (trees: dicts of dicts)
defaultTree = load(theTask, canExecute=False, strict=True, defaults=True)
thisTree = load(theTask, canExecute=False, strict=True, defaults=False)
# they must be flattenable
defaultFlat = cfgpars.flattenDictTree(defaultTree)
thisFlat = cfgpars.flattenDictTree(thisTree)
# use the "set" operations till there is a dict.diff()
# thanks to: http://stackoverflow.com/questions/715234
diffFlat = dict( set(thisFlat.items()) - \
set(defaultFlat.items()) )
if report:
defaults_of_diffs_only = {}
# { k:defaultFlat[k] for k in diffFlat.keys() }
for k in diffFlat:
defaults_of_diffs_only[k] = defaultFlat[k]
msg = 'Non-default values of "'+str(theTask)+'":\n'+ \
_flat2str(diffFlat)+ \
'\n\nDefault values:\n'+ \
_flat2str(defaults_of_diffs_only)
print(msg)
return diffFlat
|
Return True if the given file name is located in an
installed area (versus a user-owned file)
def _isInstalled(fullFname):
""" Return True if the given file name is located in an
installed area (versus a user-owned file) """
if not fullFname: return False
if not os.path.exists(fullFname): return False
instAreas = []
try:
import site
instAreas = site.getsitepackages()
except:
pass # python 2.6 and lower don't have site.getsitepackages()
if len(instAreas) < 1:
instAreas = [ os.path.dirname(os.__file__) ]
for ia in instAreas:
if fullFname.find(ia) >= 0:
return True
return False
|
.cfgspc embedded code execution is done here, in a relatively confined
space. The variables available to the code to be executed are:
SCOPE, NAME, VAL, PARENT, TEAL
The code string itself is expected to set a var named OUT
def execEmbCode(SCOPE, NAME, VAL, TEAL, codeStr):
""" .cfgspc embedded code execution is done here, in a relatively confined
space. The variables available to the code to be executed are:
SCOPE, NAME, VAL, PARENT, TEAL
The code string itself is expected to set a var named OUT
"""
# This was all we needed in Python 2.x
# OUT = None
# exec codeStr
# return OUT
# In Python 3 (& 2.x) be more explicit: http://bugs.python.org/issue4831
PARENT = None
if TEAL:
PARENT = TEAL.top
OUT = None
ldict = locals() # will have OUT in it
exec(codeStr, globals(), ldict)
return ldict['OUT']
|
Print a message listing TEAL-enabled tasks available under a
given installation directory (where pkgName resides).
If always is True, this will always print when tasks are
found; otherwise it will only print found tasks when in interactive
mode.
The parameter 'hidden' supports a list of input tasknames that should
not be reported even though they still exist.
def print_tasknames(pkgName, aDir, term_width=80, always=False,
hidden=None):
""" Print a message listing TEAL-enabled tasks available under a
given installation directory (where pkgName resides).
If always is True, this will always print when tasks are
found; otherwise it will only print found tasks when in interactive
mode.
The parameter 'hidden' supports a list of input tasknames that should
not be reported even though they still exist.
"""
# See if we can bail out early
if not always:
# We can't use the sys.ps1 check if in PyRAF since it changes sys
if 'pyraf' not in sys.modules:
# sys.ps1 is only defined in interactive mode
if not hasattr(sys, 'ps1'):
return # leave here, we're in someone's script
# Check for tasks
taskDict = cfgpars.findAllCfgTasksUnderDir(aDir)
tasks = [x for x in taskDict.values() if len(x) > 0]
if hidden: # could even account for a single taskname as input here if needed
for x in hidden:
if x in tasks: tasks.remove(x)
# only be verbose if there something found
if len(tasks) > 0:
sortedUniqTasks = sorted(set(tasks))
if len(sortedUniqTasks) == 1:
tlines = 'The following task in the '+pkgName+\
' package can be run with TEAL:\n'
else:
tlines = 'The following tasks in the '+pkgName+\
' package can be run with TEAL:\n'
tlines += printColsAuto(sortedUniqTasks, term_width=term_width,
min_pad=2)
print(tlines)
|
This functions will return useful help as a string read from a file
in the task's installed directory called "<module>.help".
If no such file can be found, it will simply return an empty string.
Notes
-----
The location of the actual help file will be found under the task's
installed directory using 'irafutils.rglob' to search all sub-dirs to
find the file. This allows the help file to be either in the tasks
installed directory or in any sub-directory, such as a "help/" directory.
Parameters
----------
taskname: string
Value of `__taskname__` for a module/task
taskpath: string
Value of `__file__` for an installed module which defines the task
Returns
-------
helpString: string
multi-line string read from the file '<taskname>.help'
def getHelpFileAsString(taskname,taskpath):
"""
This functions will return useful help as a string read from a file
in the task's installed directory called "<module>.help".
If no such file can be found, it will simply return an empty string.
Notes
-----
The location of the actual help file will be found under the task's
installed directory using 'irafutils.rglob' to search all sub-dirs to
find the file. This allows the help file to be either in the tasks
installed directory or in any sub-directory, such as a "help/" directory.
Parameters
----------
taskname: string
Value of `__taskname__` for a module/task
taskpath: string
Value of `__file__` for an installed module which defines the task
Returns
-------
helpString: string
multi-line string read from the file '<taskname>.help'
"""
#get the local library directory where the code is stored
pathsplit=os.path.split(taskpath) # taskpath should be task's __file__
if taskname.find('.') > -1: # if taskname is given as package.taskname...
helpname=taskname.split(".")[1] # taskname should be __taskname__ from task's module
else:
helpname = taskname
localdir = pathsplit[0]
if localdir == '':
localdir = '.'
helpfile=rglob(localdir,helpname+".help")[0]
if os.access(helpfile,os.R_OK):
fh=open(helpfile,'r')
ss=fh.readlines()
fh.close()
helpString=""
for line in ss:
helpString+=line
else:
helpString= ''
return helpString
|
Get a stringified val from a ConfigObj obj and return it as bool
def cfgGetBool(theObj, name, dflt):
""" Get a stringified val from a ConfigObj obj and return it as bool """
strval = theObj.get(name, None)
if strval is None:
return dflt
return strval.lower().strip() == 'true'
|
Override so that we can run in a different mode.
def _overrideMasterSettings(self):
""" Override so that we can run in a different mode. """
# config-obj dict of defaults
cod = self._getGuiSettings()
# our own GUI setup
self._appName = APP_NAME
self._appHelpString = tealHelpString
self._useSimpleAutoClose = self._do_usac
self._showExtraHelpButton = False
self._saveAndCloseOnExec = cfgGetBool(cod, 'saveAndCloseOnExec', True)
self._showHelpInBrowser = cfgGetBool(cod, 'showHelpInBrowser', False)
self._writeProtectOnSaveAs = cfgGetBool(cod, 'writeProtectOnSaveAsOpt', True)
self._flagNonDefaultVals = cfgGetBool(cod, 'flagNonDefaultVals', None)
self._optFile = APP_NAME.lower()+".optionDB"
# our own colors
# prmdrss teal: #00ffaa, pure cyan (teal) #00ffff (darker) #008080
# "#aaaaee" is a darker but good blue, but "#bbbbff" pops
ltblu = "#ccccff" # light blue
drktl = "#008888" # darkish teal
self._frmeColor = cod.get('frameColor', drktl)
self._taskColor = cod.get('taskBoxColor', ltblu)
self._bboxColor = cod.get('buttonBoxColor', ltblu)
self._entsColor = cod.get('entriesColor', ltblu)
self._flagColor = cod.get('flaggedColor', 'brown')
# double check _canExecute, but only if it is still set to the default
if self._canExecute and self._taskParsObj: # default _canExecute=True
self._canExecute = self._taskParsObj.canExecute()
self._showExecuteButton = self._canExecute
# check on the help string - just to see if it is HTML
# (could use HTMLParser here if need be, be quick and simple tho)
hhh = self.getHelpString(self.pkgName+'.'+self.taskName)
if hhh:
hhh = hhh.lower()
if hhh.find('<html') >= 0 or hhh.find('</html>') > 0:
self._knowTaskHelpIsHtml = True
elif hhh.startswith('http:') or hhh.startswith('https:'):
self._knowTaskHelpIsHtml = True
elif hhh.startswith('file:') and \
(hhh.endswith('.htm') or hhh.endswith('.html')):
self._knowTaskHelpIsHtml = True
|
Override this so we can handle case of file not writable, as
well as to make our _lastSavedState copy.
def _doActualSave(self, fname, comment, set_ro=False, overwriteRO=False):
""" Override this so we can handle case of file not writable, as
well as to make our _lastSavedState copy. """
self.debug('Saving, file name given: '+str(fname)+', set_ro: '+\
str(set_ro)+', overwriteRO: '+str(overwriteRO))
cantWrite = False
inInstArea = False
if fname in (None, ''): fname = self._taskParsObj.getFilename()
# now do some final checks then save
try:
if _isInstalled(fname): # check: may be installed but not read-only
inInstArea = cantWrite = True
else:
# in case of save-as, allow overwrite of read-only file
if overwriteRO and os.path.exists(fname):
setWritePrivs(fname, True, True) # try make writable
# do the save
rv=self._taskParsObj.saveParList(filename=fname,comment=comment)
except IOError:
cantWrite = True
# User does not have privs to write to this file. Get name of local
# choice and try to use that.
if cantWrite:
fname = self._taskParsObj.getDefaultSaveFilename()
# Tell them the context is changing, and where we are saving
msg = 'Read-only config file for task "'
if inInstArea:
msg = 'Installed config file for task "'
msg += self._taskParsObj.getName()+'" is not to be overwritten.'+\
' Values will be saved to: \n\n\t"'+fname+'".'
showwarning(message=msg, title="Will not overwrite!")
# Try saving to their local copy
rv=self._taskParsObj.saveParList(filename=fname, comment=comment)
# Treat like a save-as (update title for ALL save ops)
self._saveAsPostSave_Hook(fname)
# Limit write privs if requested (only if not in the rc dir)
if set_ro and os.path.dirname(os.path.abspath(fname)) != \
os.path.abspath(self._rcDir):
cfgpars.checkSetReadOnly(fname)
# Before returning, make a copy so we know what was last saved.
# The dict() method returns a deep-copy dict of the keyvals.
self._lastSavedState = self._taskParsObj.dict()
return rv
|
Determine if there are any edits in the GUI that have not yet been
saved (e.g. to a file).
def hasUnsavedChanges(self):
""" Determine if there are any edits in the GUI that have not yet been
saved (e.g. to a file). """
# Sanity check - this case shouldn't occur
assert self._lastSavedState is not None, \
"BUG: Please report this as it should never occur."
# Force the current GUI values into our model in memory, but don't
# change anything. Don't save to file, don't even convert bad
# values to their previous state in the gui. Note that this can
# leave the GUI in a half-saved state, but since we are about to exit
# this is OK. We only want prompting to occur if they decide to save.
badList = self.checkSetSaveEntries(doSave=False, fleeOnBadVals=True,
allowGuiChanges=False)
if badList:
return True
# Then compare our data to the last known saved state. MAKE SURE
# the LHS is the actual dict (and not 'self') to invoke the dict
# comparison only.
return self._lastSavedState != self._taskParsObj
|
Override to allow us to use an edited callback.
def _defineEditedCallbackObjectFor(self, parScope, parName):
""" Override to allow us to use an edited callback. """
# We know that the _taskParsObj is a ConfigObjPars
triggerStrs = self._taskParsObj.getTriggerStrings(parScope, parName)
# Some items will have a trigger, but likely most won't
if triggerStrs and len(triggerStrs) > 0:
return self
else:
return None
|
Override so we can append read-only status.
def updateTitle(self, atitle):
""" Override so we can append read-only status. """
if atitle and os.path.exists(atitle):
if _isInstalled(atitle):
atitle += ' [installed]'
elif not os.access(atitle, os.W_OK):
atitle += ' [read only]'
super(ConfigObjEparDialog, self).updateTitle(atitle)
|
This is the callback function invoked when an item is edited.
This is only called for those items which were previously
specified to use this mechanism. We do not turn this on for
all items because the performance might be prohibitive.
This kicks off any previously registered triggers.
def edited(self, scope, name, lastSavedVal, newVal, action):
""" This is the callback function invoked when an item is edited.
This is only called for those items which were previously
specified to use this mechanism. We do not turn this on for
all items because the performance might be prohibitive.
This kicks off any previously registered triggers. """
# Get name(s) of any triggers that this par triggers
triggerNamesTup = self._taskParsObj.getTriggerStrings(scope, name)
assert triggerNamesTup is not None and len(triggerNamesTup) > 0, \
'Empty trigger name for: "'+name+'", consult the .cfgspc file.'
# Loop through all trigger names - each one is a trigger to kick off -
# in the order that they appear in the tuple we got. Most cases will
# probably only have a single trigger in the tuple.
for triggerName in triggerNamesTup:
# First handle the known/canned trigger names
# print (scope, name, newVal, action, triggerName) # DBG: debug line
# _section_switch_
if triggerName == '_section_switch_':
# Try to uniformly handle all possible par types here, not
# just boolean (e.g. str, int, float, etc.)
# Also, see logic in _BooleanMixin._coerceOneValue()
state = newVal not in self.FALSEVALS
self._toggleSectionActiveState(scope, state, (name,))
continue
# _2_section_switch_ (see notes above in _section_switch_)
if triggerName == '_2_section_switch_':
state = newVal not in self.FALSEVALS
# toggle most of 1st section (as usual) and ALL of next section
self._toggleSectionActiveState(scope, state, (name,))
# get first par of next section (fpons) - is a tuple
fpons = self.findNextSection(scope, name)
nextSectScope = fpons[0]
if nextSectScope:
self._toggleSectionActiveState(nextSectScope, state, None)
continue
# Now handle rules with embedded code (eg. triggerName=='_rule1_')
if '_RULES_' in self._taskParsObj and \
triggerName in self._taskParsObj['_RULES_'].configspec:
# Get codeStr to execute it, but before we do so, check 'when' -
# make sure this is an action that is allowed to cause a trigger
ruleSig = self._taskParsObj['_RULES_'].configspec[triggerName]
chkArgsDict = vtor_checks.sigStrToKwArgsDict(ruleSig)
codeStr = chkArgsDict.get('code') # or None if didn't specify
when2run = chkArgsDict.get('when') # or None if didn't specify
greenlight = False # do we have a green light to eval the rule?
if when2run is None:
greenlight = True # means run rule for any possible action
else: # 'when' was set to something so we need to check action
# check value of action (poor man's enum)
assert action in editpar.GROUP_ACTIONS, \
"Unknown action: "+str(action)+', expected one of: '+ \
str(editpar.GROUP_ACTIONS)
# check value of 'when' (allow them to use comma-sep'd str)
# (readers be aware that values must be those possible for
# 'action', and 'always' is also allowed)
whenlist = when2run.split(',')
# warn for invalid values
for w in whenlist:
if not w in editpar.GROUP_ACTIONS and w != 'always':
print('WARNING: skipping bad value for when kwd: "'+\
w+'" in trigger/rule: '+triggerName)
# finally, do the correlation
greenlight = 'always' in whenlist or action in whenlist
# SECURITY NOTE: because this part executes arbitrary code, that
# code string must always be found only in the configspec file,
# which is intended to only ever be root-installed w/ the pkg.
if codeStr:
if not greenlight:
continue # not an error, just skip this one
self.showStatus("Evaluating "+triggerName+' ...') #dont keep
self.top.update_idletasks() #allow msg to draw prior to exec
# execute it and retrieve the outcome
try:
outval = execEmbCode(scope, name, newVal, self, codeStr)
except Exception as ex:
outval = 'ERROR in '+triggerName+': '+str(ex)
print(outval)
msg = outval+':\n'+('-'*99)+'\n'+traceback.format_exc()
msg += 'CODE: '+codeStr+'\n'+'-'*99+'\n'
self.debug(msg)
self.showStatus(outval, keep=1)
# Leave this debug line in until it annoys someone
msg = 'Value of "'+name+'" triggered "'+triggerName+'"'
stroutval = str(outval)
if len(stroutval) < 30: msg += ' --> "'+stroutval+'"'
self.showStatus(msg, keep=0)
# Now that we have triggerName evaluated to outval, we need
# to look through all the parameters and see if there are
# any items to be affected by triggerName (e.g. '_rule1_')
self._applyTriggerValue(triggerName, outval)
continue
# If we get here, we have an unknown/unusable trigger
raise RuntimeError('Unknown trigger for: "'+name+'", named: "'+ \
str(triggerName)+'". Please consult the .cfgspc file.')
|
Starts with given par (scope+name) and looks further down the list
of parameters until one of a different non-null scope is found. Upon
success, returns the (scope, name) tuple, otherwise (None, None).
def findNextSection(self, scope, name):
""" Starts with given par (scope+name) and looks further down the list
of parameters until one of a different non-null scope is found. Upon
success, returns the (scope, name) tuple, otherwise (None, None). """
# first find index of starting point
plist = self._taskParsObj.getParList()
start = 0
for i in range(len(plist)):
if scope == plist[i].scope and name == plist[i].name:
start = i
break
else:
print('WARNING: could not find starting par: '+scope+'.'+name)
return (None, None)
# now find first different (non-null) scope in a par, after start
for i in range(start, len(plist)):
if len(plist[i].scope) > 0 and plist[i].scope != scope:
return (plist[i].scope, plist[i].name)
# else didn't find it
return (None, None)
|
Overridden version for ConfigObj. theTask can be either
a .cfg file name or a ConfigObjPars object.
def _setTaskParsObj(self, theTask):
""" Overridden version for ConfigObj. theTask can be either
a .cfg file name or a ConfigObjPars object. """
# Create the ConfigObjPars obj
self._taskParsObj = cfgpars.getObjectFromTaskArg(theTask,
self._strict, False)
# Tell it that we can be used for catching debug lines
self._taskParsObj.setDebugLogger(self)
# Immediately make a copy of it's un-tampered internal dict.
# The dict() method returns a deep-copy dict of the keyvals.
self._lastSavedState = self._taskParsObj.dict()
|
Return a string to be used as the filter arg to the save file
dialog during Save-As.
def _getSaveAsFilter(self):
""" Return a string to be used as the filter arg to the save file
dialog during Save-As. """
# figure the dir to use, start with the one from the file
absRcDir = os.path.abspath(self._rcDir)
thedir = os.path.abspath(os.path.dirname(self._taskParsObj.filename))
# skip if not writeable, or if is _rcDir
if thedir == absRcDir or not os.access(thedir, os.W_OK):
thedir = os.path.abspath(os.path.curdir)
# create save-as filter string
filt = thedir+'/*.cfg'
envVarName = APP_NAME.upper()+'_CFG'
if envVarName in os.environ:
upx = os.environ[envVarName]
if len(upx) > 0: filt = upx+"/*.cfg"
# done
return filt
|
Go through all possible sites to find applicable .cfg files.
Return as an iterable.
def _getOpenChoices(self):
""" Go through all possible sites to find applicable .cfg files.
Return as an iterable. """
tsk = self._taskParsObj.getName()
taskFiles = set()
dirsSoFar = [] # this helps speed this up (skip unneeded globs)
# last dir
aDir = os.path.dirname(self._taskParsObj.filename)
if len(aDir) < 1: aDir = os.curdir
dirsSoFar.append(aDir)
taskFiles.update(cfgpars.getCfgFilesInDirForTask(aDir, tsk))
# current dir
aDir = os.getcwd()
if aDir not in dirsSoFar:
dirsSoFar.append(aDir)
taskFiles.update(cfgpars.getCfgFilesInDirForTask(aDir, tsk))
# task's python pkg dir (if tsk == python pkg name)
try:
x, pkgf = cfgpars.findCfgFileForPkg(tsk, '.cfg', taskName=tsk,
pkgObj=self._taskParsObj.getAssocPkg())
taskFiles.update( (pkgf,) )
except cfgpars.NoCfgFileError:
pass # no big deal - maybe there is no python package
# user's own resourceDir
aDir = self._rcDir
if aDir not in dirsSoFar:
dirsSoFar.append(aDir)
taskFiles.update(cfgpars.getCfgFilesInDirForTask(aDir, tsk))
# extra loc - see if they used the app's env. var
aDir = dirsSoFar[0] # flag to skip this if no env var found
envVarName = APP_NAME.upper()+'_CFG'
if envVarName in os.environ: aDir = os.environ[envVarName]
if aDir not in dirsSoFar:
dirsSoFar.append(aDir)
taskFiles.update(cfgpars.getCfgFilesInDirForTask(aDir, tsk))
# At the very end, add an option which we will later interpret to mean
# to open the file dialog.
taskFiles = list(taskFiles) # so as to keep next item at end of seq
taskFiles.sort()
taskFiles.append("Other ...")
return taskFiles
|
Load the parameter settings from a user-specified file.
def pfopen(self, event=None):
""" Load the parameter settings from a user-specified file. """
# Get the selected file name
fname = self._openMenuChoice.get()
# Also allow them to simply find any file - do not check _task_name_...
# (could use tkinter's FileDialog, but this one is prettier)
if fname[-3:] == '...':
if capable.OF_TKFD_IN_EPAR:
fname = askopenfilename(title="Load Config File",
parent=self.top)
else:
from . import filedlg
fd = filedlg.PersistLoadFileDialog(self.top,
"Load Config File",
self._getSaveAsFilter())
if fd.Show() != 1:
fd.DialogCleanup()
return
fname = fd.GetFileName()
fd.DialogCleanup()
if not fname: return # canceled
self.debug('Loading from: '+fname)
# load it into a tmp object (use associatedPkg if we have one)
try:
tmpObj = cfgpars.ConfigObjPars(fname, associatedPkg=\
self._taskParsObj.getAssocPkg(),
strict=self._strict)
except Exception as ex:
showerror(message=ex.message, title='Error in '+os.path.basename(fname))
self.debug('Error in '+os.path.basename(fname))
self.debug(traceback.format_exc())
return
# check it to make sure it is a match
if not self._taskParsObj.isSameTaskAs(tmpObj):
msg = 'The current task is "'+self._taskParsObj.getName()+ \
'", but the selected file is for task "'+ \
str(tmpObj.getName())+'". This file was not loaded.'
showerror(message=msg, title="Error in "+os.path.basename(fname))
self.debug(msg)
self.debug(traceback.format_exc())
return
# Set the GUI entries to these values (let the user Save after)
newParList = tmpObj.getParList()
try:
self.setAllEntriesFromParList(newParList, updateModel=True)
# go ahead and updateModel, even though it will take longer,
# we need it updated for the copy of the dict we make below
except editpar.UnfoundParamError as pe:
showwarning(message=str(pe), title="Error in "+os.path.basename(fname))
# trip any triggers
self.checkAllTriggers('fopen')
# This new fname is our current context
self.updateTitle(fname)
self._taskParsObj.filename = fname # !! maybe try setCurrentContext() ?
self.freshenFocus()
self.showStatus("Loaded values from: "+fname, keep=2)
# Since we are in a new context (and have made no changes yet), make
# a copy so we know what the last state was.
# The dict() method returns a deep-copy dict of the keyvals.
self._lastSavedState = self._taskParsObj.dict()
|
Override to include ConfigObj filename and specific errors.
Note that this only handles "missing" pars and "extra" pars, not
wrong-type pars. So it isn't that big of a deal.
def _handleParListMismatch(self, probStr, extra=False):
""" Override to include ConfigObj filename and specific errors.
Note that this only handles "missing" pars and "extra" pars, not
wrong-type pars. So it isn't that big of a deal. """
# keep down the duplicate errors
if extra:
return True # the base class is already stating it will be ignored
# find the actual errors, and then add that to the generic message
errmsg = 'Warning: '
if self._strict:
errmsg = 'ERROR: '
errmsg = errmsg+'mismatch between default and current par lists ' + \
'for task "'+self.taskName+'".'
if probStr:
errmsg += '\n\t'+probStr
errmsg += '\nTry editing/deleting: "' + \
self._taskParsObj.filename+'" (or, if in PyRAF: "unlearn ' + \
self.taskName+'").'
print(errmsg)
return True
|
Load the default parameter settings into the GUI.
def _setToDefaults(self):
""" Load the default parameter settings into the GUI. """
# Create an empty object, where every item is set to it's default value
try:
tmpObj = cfgpars.ConfigObjPars(self._taskParsObj.filename,
associatedPkg=\
self._taskParsObj.getAssocPkg(),
setAllToDefaults=self.taskName,
strict=False)
except Exception as ex:
msg = "Error Determining Defaults"
showerror(message=msg+'\n\n'+ex.message, title="Error Determining Defaults")
return
# Set the GUI entries to these values (let the user Save after)
tmpObj.filename = self._taskParsObj.filename = '' # name it later
newParList = tmpObj.getParList()
try:
self.setAllEntriesFromParList(newParList) # needn't updateModel yet
self.checkAllTriggers('defaults')
self.updateTitle('')
self.showStatus("Loaded default "+self.taskName+" values via: "+ \
os.path.basename(tmpObj._original_configspec), keep=1)
except editpar.UnfoundParamError as pe:
showerror(message=str(pe), title="Error Setting to Default Values")
|
Retrieve the current parameter settings from the GUI.
def getDict(self):
""" Retrieve the current parameter settings from the GUI."""
# We are going to have to return the dict so let's
# first make sure all of our models are up to date with the values in
# the GUI right now.
badList = self.checkSetSaveEntries(doSave=False)
if badList:
self.processBadEntries(badList, self.taskName, canCancel=False)
return self._taskParsObj.dict()
|
Load the parameter settings from a given dict into the GUI.
def loadDict(self, theDict):
""" Load the parameter settings from a given dict into the GUI. """
# We are going to have to merge this info into ourselves so let's
# first make sure all of our models are up to date with the values in
# the GUI right now.
badList = self.checkSetSaveEntries(doSave=False)
if badList:
if not self.processBadEntries(badList, self.taskName):
return
# now, self._taskParsObj is up-to-date
# So now we update _taskParsObj with the input dict
cfgpars.mergeConfigObj(self._taskParsObj, theDict)
# now sync the _taskParsObj dict with its par list model
# '\n'.join([str(jj) for jj in self._taskParsObj.getParList()])
self._taskParsObj.syncParamList(False)
# Set the GUI entries to these values (let the user Save after)
try:
self.setAllEntriesFromParList(self._taskParsObj.getParList(),
updateModel=True)
self.checkAllTriggers('fopen')
self.freshenFocus()
self.showStatus('Loaded '+str(len(theDict))+ \
' user par values for: '+self.taskName, keep=1)
except Exception as ex:
showerror(message=ex.message, title="Error Setting to Loaded Values")
|
Return a dict (ConfigObj) of all user settings found in rcFile.
def _getGuiSettings(self):
""" Return a dict (ConfigObj) of all user settings found in rcFile. """
# Put the settings into a ConfigObj dict (don't use a config-spec)
rcFile = self._rcDir+os.sep+APP_NAME.lower()+'.cfg'
if os.path.exists(rcFile):
try:
return configobj.ConfigObj(rcFile)
except:
raise RuntimeError('Error parsing: '+os.path.realpath(rcFile))
# tho, for simple types, unrepr=True eliminates need for .cfgspc
# also, if we turn unrepr on, we don't need cfgGetBool
else:
return {}
|
The base class doesn't implement this, so we will - save settings
(only GUI stuff, not task related) to a file.
def _saveGuiSettings(self):
""" The base class doesn't implement this, so we will - save settings
(only GUI stuff, not task related) to a file. """
# Put the settings into a ConfigObj dict (don't use a config-spec)
rcFile = self._rcDir+os.sep+APP_NAME.lower()+'.cfg'
#
if os.path.exists(rcFile): os.remove(rcFile)
co = configobj.ConfigObj(rcFile) # can skip try-block, won't read file
co['showHelpInBrowser'] = self._showHelpInBrowser
co['saveAndCloseOnExec'] = self._saveAndCloseOnExec
co['writeProtectOnSaveAsOpt'] = self._writeProtectOnSaveAs
co['flagNonDefaultVals'] = self._flagNonDefaultVals
co['frameColor'] = self._frmeColor
co['taskBoxColor'] = self._taskColor
co['buttonBoxColor'] = self._bboxColor
co['entriesColor'] = self._entsColor
co['flaggedColor'] = self._flagColor
co.initial_comment = ['Automatically generated by '+\
APP_NAME+'. All edits will eventually be overwritten.']
co.initial_comment.append('To use platform default colors, delete each color line below.')
co.final_comment = [''] # ensure \n at EOF
co.write()
|
Here we look through the entire .cfgspc to see if any parameters
are affected by this trigger. For those that are, we apply the action
to the GUI widget. The action is specified by depType.
def _applyTriggerValue(self, triggerName, outval):
""" Here we look through the entire .cfgspc to see if any parameters
are affected by this trigger. For those that are, we apply the action
to the GUI widget. The action is specified by depType. """
# First find which items are dependent upon this trigger (cached)
# e.g. { scope1.name1 : dep'cy-type, scope2.name2 : dep'cy-type, ... }
depParsDict = self._taskParsObj.getParsWhoDependOn(triggerName)
if not depParsDict: return
if 0: print("Dependent parameters:\n"+str(depParsDict)+"\n")
# Get model data, the list of pars
theParamList = self._taskParsObj.getParList()
# Then go through the dependent pars and apply the trigger to them
settingMsg = ''
for absName in depParsDict:
used = False
# For each dep par, loop to find the widget for that scope.name
for i in range(self.numParams):
scopedName = theParamList[i].scope+'.'+theParamList[i].name # diff from makeFullName!!
if absName == scopedName: # a match was found
depType = depParsDict[absName]
if depType == 'active_if':
self.entryNo[i].setActiveState(outval)
elif depType == 'inactive_if':
self.entryNo[i].setActiveState(not outval)
elif depType == 'is_set_by':
self.entryNo[i].forceValue(outval, noteEdited=True)
# WARNING! noteEdited=True may start recursion!
if len(settingMsg) > 0: settingMsg += ", "
settingMsg += '"'+theParamList[i].name+'" to "'+\
outval+'"'
elif depType in ('set_yes_if', 'set_no_if'):
if bool(outval):
newval = 'yes'
if depType == 'set_no_if': newval = 'no'
self.entryNo[i].forceValue(newval, noteEdited=True)
# WARNING! noteEdited=True may start recursion!
if len(settingMsg) > 0: settingMsg += ", "
settingMsg += '"'+theParamList[i].name+'" to "'+\
newval+'"'
else:
if len(settingMsg) > 0: settingMsg += ", "
settingMsg += '"'+theParamList[i].name+\
'" (no change)'
elif depType == 'is_disabled_by':
# this one is only used with boolean types
on = self.entryNo[i].convertToNative(outval)
if on:
# do not activate whole section or change
# any values, only activate this one
self.entryNo[i].setActiveState(True)
else:
# for off, set the bool par AND grey WHOLE section
self.entryNo[i].forceValue(outval, noteEdited=True)
self.entryNo[i].setActiveState(False)
# we'd need this if the par had no _section_switch_
# self._toggleSectionActiveState(
# theParamList[i].scope, False, None)
if len(settingMsg) > 0: settingMsg += ", "
settingMsg += '"'+theParamList[i].name+'" to "'+\
outval+'"'
else:
raise RuntimeError('Unknown dependency: "'+depType+ \
'" for par: "'+scopedName+'"')
used = True
break
# Or maybe it is a whole section
if absName.endswith('._section_'):
scope = absName[:-10]
depType = depParsDict[absName]
if depType == 'active_if':
self._toggleSectionActiveState(scope, outval, None)
elif depType == 'inactive_if':
self._toggleSectionActiveState(scope, not outval, None)
used = True
# Help to debug the .cfgspc rules
if not used:
raise RuntimeError('UNUSED "'+triggerName+'" dependency: '+ \
str({absName:depParsDict[absName]}))
if len(settingMsg) > 0:
# why ?! self.freshenFocus()
self.showStatus('Automatically set '+settingMsg, keep=1)
|
Given a fits filename repesenting an association table reads in the table as a
dictionary which can be used by pydrizzle and multidrizzle.
An association table is a FITS binary table with 2 required columns: 'MEMNAME',
'MEMTYPE'. It checks 'MEMPRSNT' column and removes all files for which its value is 'no'.
Parameters
----------
fname : str
name of association table
output : str
name of output product - if not specified by the user,
the first PROD-DTH name is used if present,
if not, the first PROD-RPT name is used if present,
if not, the rootname of the input association table is used.
prodonly : bool
what files should be considered as input
if True - select only MEMTYPE=PROD* as input
if False - select only MEMTYPE=EXP as input
Returns
-------
asndict : dict
A dictionary-like object with all the association information.
Examples
--------
An association table can be read from a file using the following commands::
>>> from stsci.tools import asnutil
>>> asntab = asnutil.readASNTable('j8bt06010_shifts_asn.fits', prodonly=False) # doctest: +SKIP
The `asntab` object can now be passed to other code to provide relationships
between input and output images defined by the association table.
def readASNTable(fname, output=None, prodonly=False):
"""
Given a fits filename repesenting an association table reads in the table as a
dictionary which can be used by pydrizzle and multidrizzle.
An association table is a FITS binary table with 2 required columns: 'MEMNAME',
'MEMTYPE'. It checks 'MEMPRSNT' column and removes all files for which its value is 'no'.
Parameters
----------
fname : str
name of association table
output : str
name of output product - if not specified by the user,
the first PROD-DTH name is used if present,
if not, the first PROD-RPT name is used if present,
if not, the rootname of the input association table is used.
prodonly : bool
what files should be considered as input
if True - select only MEMTYPE=PROD* as input
if False - select only MEMTYPE=EXP as input
Returns
-------
asndict : dict
A dictionary-like object with all the association information.
Examples
--------
An association table can be read from a file using the following commands::
>>> from stsci.tools import asnutil
>>> asntab = asnutil.readASNTable('j8bt06010_shifts_asn.fits', prodonly=False) # doctest: +SKIP
The `asntab` object can now be passed to other code to provide relationships
between input and output images defined by the association table.
"""
try:
f = fits.open(fu.osfn(fname))
except:
raise IOError("Can't open file %s\n" % fname)
colnames = f[1].data.names
try:
colunits = f[1].data.units
except AttributeError: pass
hdr = f[0].header
if 'MEMNAME' not in colnames or 'MEMTYPE' not in colnames:
msg = 'Association table incomplete: required column(s) MEMNAME/MEMTYPE NOT found!'
raise ValueError(msg)
d = {}
for n in colnames:
d[n]=f[1].data.field(n)
f.close()
valid_input = d['MEMPRSNT'].copy()
memtype = d['MEMTYPE'].copy()
prod_dth = (memtype.find('PROD-DTH')==0).nonzero()[0]
prod_rpt = (memtype.find('PROD-RPT')==0).nonzero()[0]
prod_crj = (memtype.find('PROD-CRJ')==0).nonzero()[0]
# set output name
if output is None:
if prod_dth:
output = d['MEMNAME'][prod_dth[0]]
elif prod_rpt:
output = d['MEMNAME'][prod_rpt[0]]
elif prod_crj:
output = d['MEMNAME'][prod_crj[0]]
else:
output = fname.split('_')[0]
if prodonly:
input = d['MEMTYPE'].find('PROD')==0
if prod_dth:
input[prod_dth] = False
else:
input = (d['MEMTYPE'].find('EXP')==0)
valid_input *= input
for k in d:
d[k] = d[k][valid_input]
infiles = list(d['MEMNAME'].lower())
if not infiles:
print("No valid input specified")
return None
if ('XOFFSET' in colnames and d['XOFFSET'].any()) or ('YOFFSET' in colnames and d['YOFFSET'].any()):
abshift = True
dshift = False
try:
units=colunits[colnames.index('XOFFSET')]
except: units='pixels'
xshifts = list(d['XOFFSET'])
yshifts = list(d['YOFFSET'])
elif ('XDELTA' in colnames and d['XDELTA'].any()) or ('YDELTA' in colnames and d['YDELTA'].any()):
abshift = False
dshift = True
try:
units=colunits[colnames.index('XDELTA')]
except: units='pixels'
xshifts = list(d['XDELTA'])
yshifts = list(d['YDELTA'])
else:
abshift = False
dshift = False
members = {}
if not abshift and not dshift:
asndict = ASNTable(infiles,output=output)
asndict.create()
return asndict
else:
try:
refimage = hdr['refimage']
except KeyError: refimage = None
try:
frame = hdr['shframe']
except KeyError: frame = 'input'
if 'ROTATION' in colnames:
rots = list(d['ROTATION'])
if 'SCALE' in colnames:
scales = list(d['SCALE'])
for r in range(len(infiles)):
row = r
xshift = xshifts[r]
yshift = yshifts[r]
if rots: rot = rots[r]
if scales: scale = scales[r]
members[infiles[r]] = ASNMember(row=row, dshift=dshift, abshift=abshift, rot=rot, xshift=xshift,
yshift=yshift, scale=scale, refimage=refimage, shift_frame=frame,
shift_units=units)
asndict= ASNTable(infiles, output=output)
asndict.create()
asndict['members'].update(members)
return asndict
|
Write association table to a file.
def write(self, output=None):
"""
Write association table to a file.
"""
if not output:
outfile = self['output']+'_asn.fits'
output = self['output']
else:
outfile = output
# Delete the file if it exists.
if os.path.exists(outfile):
warningmsg = "\n#########################################\n"
warningmsg += "# #\n"
warningmsg += "# WARNING: #\n"
warningmsg += "# The existing association table, #\n"
warningmsg += " " + str(outfile) + '\n'
warningmsg += "# is being replaced. #\n"
warningmsg += "# #\n"
warningmsg += "#########################################\n\n"
fasn = fits.HDUList()
# Compute maximum length of MEMNAME for table column definition
_maxlen = 0
for _fname in self['order']:
if len(_fname) > _maxlen: _maxlen = len(_fname)
# Enforce a mimimum size of 24
if _maxlen < 24: _maxlen = 24
namelen_str = str(_maxlen+2)+'A'
self.buildPrimary(fasn, output=output)
mname = self['order'][:]
mname.append(output)
mtype = ['EXP-DTH' for l in self['order']]
mtype.append('PROD-DTH')
mprsn = [True for l in self['order']]
mprsn.append(False)
xoff = [self['members'][l]['xoff'] for l in self['order']]
xoff.append(0.0)
yoff = [self['members'][l]['yoff'] for l in self['order']]
yoff.append(0.0)
xsh = [self['members'][l]['xshift'] for l in self['order']]
xsh.append(0.0)
ysh = [self['members'][l]['yshift'] for l in self['order']]
ysh.append(0.0)
rot = [self['members'][l]['rot'] for l in self['order']]
rot.append(0.0)
scl = [self['members'][l]['scale'] for l in self['order']]
scl.append(1.0)
memname = fits.Column(name='MEMNAME',format=namelen_str,array=N.char.array(mname))
memtype = fits.Column(name='MEMTYPE',format='14A',array=N.char.array(mtype))
memprsn = fits.Column(name='MEMPRSNT', format='L', array=N.array(mprsn).astype(N.uint8))
xoffset = fits.Column(name='XOFFSET', format='E', array=N.array(xoff))
yoffset = fits.Column(name='YOFFSET', format='E', array=N.array(yoff))
xdelta = fits.Column(name='XDELTA', format='E', array=N.array(xsh))
ydelta = fits.Column(name='YDELTA', format='E', array=N.array(ysh))
rotation = fits.Column(name='ROTATION', format='E', array=N.array(rot))
scale = fits.Column(name='SCALE', format='E', array=N.array(scl))
cols = fits.ColDefs([memname,memtype,memprsn,xoffset,yoffset,xdelta,ydelta,rotation,scale])
hdu = fits.BinTableHDU.from_columns(cols)
fasn.append(hdu)
if ASTROPY_VER_GE13:
fasn.writeto(outfile, overwrite=True)
else:
fasn.writeto(outfile, clobber=True)
fasn.close()
mem0 = self['order'][0]
refimg = self['members'][mem0]['refimage']
if refimg is not None:
whdu = wcsutil.WCSObject(refimg)
whdu.createReferenceWCS(outfile,overwrite=False)
ftab = fits.open(outfile)
ftab['primary'].header['refimage'] = outfile+"[wcs]"
ftab.close()
del whdu
|
Reads a shift file from disk and populates a dictionary.
def readShiftFile(self, filename):
"""
Reads a shift file from disk and populates a dictionary.
"""
order = []
fshift = open(filename,'r')
flines = fshift.readlines()
fshift.close()
common = [f.strip('#').strip() for f in flines if f.startswith('#')]
c=[line.split(': ') for line in common]
# Remove any line comments in the shift file - lines starting with '#'
# but not part of the common block.
for l in c:
if l[0] not in ['frame', 'refimage', 'form', 'units']:
c.remove(l)
for line in c: line[1]=line[1].strip()
self.update(c)
files = [f.strip().split(' ',1) for f in flines if not (f.startswith('#') or f.strip() == '')]
for f in files:
order.append(f[0])
self['order'] = order
for f in files:
# Check to see if filename provided is a full filename that corresponds
# to a file on the path. If not, try to convert given rootname into
# a valid filename based on available files. This may or may not
# define the correct filename, which is why it prints out what it is
# doing, so that the user can verify and edit the shiftfile if needed.
#NOTE:
# Supporting the specification of only rootnames in the shiftfile with this
# filename expansion is NOT to be documented, but provided solely as
# an undocumented, dangerous and not fully supported helper function for
# some backwards compatibility.
if not os.path.exists(f[0]):
f[0] = fu.buildRootname(f[0])
print('Defining filename in shiftfile as: ', f[0])
f[1] = f[1].split()
try:
f[1] = [float(s) for s in f[1]]
except:
msg = 'Cannot read in ', s, ' from shiftfile ', filename, ' as a float number'
raise ValueError(msg)
msg = "At least 2 and at most 4 shift values should be provided in a shiftfile"
if len(f[1]) < 2:
raise ValueError(msg)
elif len(f[1]) == 3:
f[1].append(1.0)
elif len(f[1]) == 2:
f[1].extend([0.0, 1.0])
elif len(f[1]) > 4:
raise ValueError(msg)
fdict = dict(files)
self.update(fdict)
|
Writes a shift file object to a file on disk using the convention for shift file format.
def writeShiftFile(self, filename="shifts.txt"):
"""
Writes a shift file object to a file on disk using the convention for shift file format.
"""
lines = ['# frame: ', self['frame'], '\n',
'# refimage: ', self['refimage'], '\n',
'# form: ', self['form'], '\n',
'# units: ', self['units'], '\n']
for o in self['order']:
ss = " "
for shift in self[o]:
ss += str(shift) + " "
line = str(o) + ss + "\n"
lines.append(line)
fshifts= open(filename, 'w')
fshifts.writelines(lines)
fshifts.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.