text
stringlengths 81
112k
|
|---|
Send a message to a recipient
Args:
target (string, class, instance, function or builtin):
The object to weave.
aspects (:py:obj:`aspectlib.Aspect`, function decorator or list of):
The aspects to apply to the object.
subclasses (bool):
If ``True``, subclasses of target are weaved. *Only available for classes*
aliases (bool):
If ``True``, aliases of target are replaced.
lazy (bool):
If ``True`` only target's ``__init__`` method is patched, the rest of the methods are patched after
``__init__`` is called. *Only available for classes*.
methods (list or regex or string):
Methods from target to patch. *Only available for classes*
Returns:
aspectlib.Rollback: An object that can rollback the patches.
Raises:
TypeError: If target is a unacceptable object, or the specified options are not available for that type of
object.
.. versionchanged:: 0.4.0
Replaced `only_methods`, `skip_methods`, `skip_magicmethods` options with `methods`.
Renamed `on_init` option to `lazy`.
Added `aliases` option.
Replaced `skip_subclasses` option with `subclasses`.
def weave(target, aspects, **options):
"""
Send a message to a recipient
Args:
target (string, class, instance, function or builtin):
The object to weave.
aspects (:py:obj:`aspectlib.Aspect`, function decorator or list of):
The aspects to apply to the object.
subclasses (bool):
If ``True``, subclasses of target are weaved. *Only available for classes*
aliases (bool):
If ``True``, aliases of target are replaced.
lazy (bool):
If ``True`` only target's ``__init__`` method is patched, the rest of the methods are patched after
``__init__`` is called. *Only available for classes*.
methods (list or regex or string):
Methods from target to patch. *Only available for classes*
Returns:
aspectlib.Rollback: An object that can rollback the patches.
Raises:
TypeError: If target is a unacceptable object, or the specified options are not available for that type of
object.
.. versionchanged:: 0.4.0
Replaced `only_methods`, `skip_methods`, `skip_magicmethods` options with `methods`.
Renamed `on_init` option to `lazy`.
Added `aliases` option.
Replaced `skip_subclasses` option with `subclasses`.
"""
if not callable(aspects):
if not hasattr(aspects, '__iter__'):
raise ExpectedAdvice('%s must be an `Aspect` instance, a callable or an iterable of.' % aspects)
for obj in aspects:
if not callable(obj):
raise ExpectedAdvice('%s must be an `Aspect` instance or a callable.' % obj)
assert target, "Can't weave falsy value %r." % target
logdebug("weave (target=%s, aspects=%s, **options=%s)", target, aspects, options)
bag = options.setdefault('bag', ObjectBag())
if isinstance(target, (list, tuple)):
return Rollback([
weave(item, aspects, **options) for item in target
])
elif isinstance(target, basestring):
parts = target.split('.')
for part in parts:
_check_name(part)
if len(parts) == 1:
return weave_module(_import_module(part), aspects, **options)
for pos in reversed(range(1, len(parts))):
owner, name = '.'.join(parts[:pos]), '.'.join(parts[pos:])
try:
owner = _import_module(owner)
except ImportError:
continue
else:
break
else:
raise ImportError("Could not import %r. Last try was for %s" % (target, owner))
if '.' in name:
path, name = name.rsplit('.', 1)
path = deque(path.split('.'))
while path:
owner = getattr(owner, path.popleft())
logdebug("@ patching %s from %s ...", name, owner)
obj = getattr(owner, name)
if isinstance(obj, (type, ClassType)):
logdebug(" .. as a class %r.", obj)
return weave_class(
obj, aspects,
owner=owner, name=name, **options
)
elif callable(obj): # or isinstance(obj, FunctionType) ??
logdebug(" .. as a callable %r.", obj)
if bag.has(obj):
return Nothing
return patch_module_function(owner, obj, aspects, force_name=name, **options)
else:
return weave(obj, aspects, **options)
name = getattr(target, '__name__', None)
if name and getattr(__builtin__, name, None) is target:
if bag.has(target):
return Nothing
return patch_module_function(__builtin__, target, aspects, **options)
elif PY3 and ismethod(target):
if bag.has(target):
return Nothing
inst = target.__self__
name = target.__name__
logdebug("@ patching %r (%s) as instance method.", target, name)
func = target.__func__
setattr(inst, name, _checked_apply(aspects, func).__get__(inst, type(inst)))
return Rollback(lambda: delattr(inst, name))
elif PY3 and isfunction(target):
if bag.has(target):
return Nothing
owner = _import_module(target.__module__)
path = deque(target.__qualname__.split('.')[:-1])
while path:
owner = getattr(owner, path.popleft())
name = target.__name__
logdebug("@ patching %r (%s) as a property.", target, name)
func = owner.__dict__[name]
return patch_module(owner, name, _checked_apply(aspects, func), func, **options)
elif PY2 and isfunction(target):
if bag.has(target):
return Nothing
return patch_module_function(_import_module(target.__module__), target, aspects, **options)
elif PY2 and ismethod(target):
if target.im_self:
if bag.has(target):
return Nothing
inst = target.im_self
name = target.__name__
logdebug("@ patching %r (%s) as instance method.", target, name)
func = target.im_func
setattr(inst, name, _checked_apply(aspects, func).__get__(inst, type(inst)))
return Rollback(lambda: delattr(inst, name))
else:
klass = target.im_class
name = target.__name__
return weave(klass, aspects, methods='%s$' % name, **options)
elif isclass(target):
return weave_class(target, aspects, **options)
elif ismodule(target):
return weave_module(target, aspects, **options)
elif type(target).__module__ not in ('builtins', '__builtin__') or InstanceType and isinstance(target, InstanceType):
return weave_instance(target, aspects, **options)
else:
raise UnsupportedType("Can't weave object %s of type %s" % (target, type(target)))
|
Low-level weaver for instances.
.. warning:: You should not use this directly.
:returns: An :obj:`aspectlib.Rollback` object.
def weave_instance(instance, aspect, methods=NORMAL_METHODS, lazy=False, bag=BrokenBag, **options):
"""
Low-level weaver for instances.
.. warning:: You should not use this directly.
:returns: An :obj:`aspectlib.Rollback` object.
"""
if bag.has(instance):
return Nothing
entanglement = Rollback()
method_matches = make_method_matcher(methods)
logdebug("weave_instance (module=%r, aspect=%s, methods=%s, lazy=%s, **options=%s)",
instance, aspect, methods, lazy, options)
def fixup(func):
return func.__get__(instance, type(instance))
fixed_aspect = aspect + [fixup] if isinstance(aspect, (list, tuple)) else [aspect, fixup]
for attr in dir(instance):
func = getattr(instance, attr)
if method_matches(attr):
if ismethod(func):
if hasattr(func, '__func__'):
realfunc = func.__func__
else:
realfunc = func.im_func
entanglement.merge(
patch_module(instance, attr, _checked_apply(fixed_aspect, realfunc, module=None), **options)
)
return entanglement
|
Low-level weaver for "whole module weaving".
.. warning:: You should not use this directly.
:returns: An :obj:`aspectlib.Rollback` object.
def weave_module(module, aspect, methods=NORMAL_METHODS, lazy=False, bag=BrokenBag, **options):
"""
Low-level weaver for "whole module weaving".
.. warning:: You should not use this directly.
:returns: An :obj:`aspectlib.Rollback` object.
"""
if bag.has(module):
return Nothing
entanglement = Rollback()
method_matches = make_method_matcher(methods)
logdebug("weave_module (module=%r, aspect=%s, methods=%s, lazy=%s, **options=%s)",
module, aspect, methods, lazy, options)
for attr in dir(module):
func = getattr(module, attr)
if method_matches(attr):
if isroutine(func):
entanglement.merge(patch_module_function(module, func, aspect, force_name=attr, **options))
elif isclass(func):
entanglement.merge(
weave_class(func, aspect, owner=module, name=attr, methods=methods, lazy=lazy, bag=bag, **options),
# it's not consistent with the other ways of weaving a class (it's never weaved as a routine).
# therefore it's disabled until it's considered useful.
# #patch_module_function(module, getattr(module, attr), aspect, force_name=attr, **options),
)
return entanglement
|
Low-level weaver for classes.
.. warning:: You should not use this directly.
def weave_class(klass, aspect, methods=NORMAL_METHODS, subclasses=True, lazy=False,
owner=None, name=None, aliases=True, bases=True, bag=BrokenBag):
"""
Low-level weaver for classes.
.. warning:: You should not use this directly.
"""
assert isclass(klass), "Can't weave %r. Must be a class." % klass
if bag.has(klass):
return Nothing
entanglement = Rollback()
method_matches = make_method_matcher(methods)
logdebug("weave_class (klass=%r, methods=%s, subclasses=%s, lazy=%s, owner=%s, name=%s, aliases=%s, bases=%s)",
klass, methods, subclasses, lazy, owner, name, aliases, bases)
if subclasses and hasattr(klass, '__subclasses__'):
sub_targets = klass.__subclasses__()
if sub_targets:
logdebug("~ weaving subclasses: %s", sub_targets)
for sub_class in sub_targets:
if not issubclass(sub_class, Fabric):
entanglement.merge(weave_class(sub_class, aspect,
methods=methods, subclasses=subclasses, lazy=lazy, bag=bag))
if lazy:
def __init__(self, *args, **kwargs):
super(SubClass, self).__init__(*args, **kwargs)
for attr in dir(self):
func = getattr(self, attr, None)
if method_matches(attr) and attr not in wrappers and isroutine(func):
setattr(self, attr, _checked_apply(aspect, force_bind(func)).__get__(self, SubClass))
wrappers = {
'__init__': _checked_apply(aspect, __init__) if method_matches('__init__') else __init__
}
for attr, func in klass.__dict__.items():
if method_matches(attr):
if ismethoddescriptor(func):
wrappers[attr] = _rewrap_method(func, klass, aspect)
logdebug(" * creating subclass with attributes %r", wrappers)
name = name or klass.__name__
SubClass = type(name, (klass, Fabric), wrappers)
SubClass.__module__ = klass.__module__
module = owner or _import_module(klass.__module__)
entanglement.merge(patch_module(module, name, SubClass, original=klass, aliases=aliases))
else:
original = {}
for attr, func in klass.__dict__.items():
if method_matches(attr):
if isroutine(func):
logdebug("@ patching attribute %r (original: %r).", attr, func)
setattr(klass, attr, _rewrap_method(func, klass, aspect))
else:
continue
original[attr] = func
entanglement.merge(lambda: deque((
setattr(klass, attr, func) for attr, func in original.items()
), maxlen=0))
if bases:
super_original = set()
for sklass in _find_super_classes(klass):
if sklass is not object:
for attr, func in sklass.__dict__.items():
if method_matches(attr) and attr not in original and attr not in super_original:
if isroutine(func):
logdebug("@ patching attribute %r (from superclass: %s, original: %r).",
attr, sklass.__name__, func)
setattr(klass, attr, _rewrap_method(func, sklass, aspect))
else:
continue
super_original.add(attr)
entanglement.merge(lambda: deque((
delattr(klass, attr) for attr in super_original
), maxlen=0))
return entanglement
|
Low-level attribute patcher.
:param module module: Object to patch.
:param str name: Attribute to patch
:param replacement: The replacement value.
:param original: The original value (in case the object beeing patched uses descriptors or is plain weird).
:param bool aliases: If ``True`` patch all the attributes that have the same original value.
:returns: An :obj:`aspectlib.Rollback` object.
def patch_module(module, name, replacement, original=UNSPECIFIED, aliases=True, location=None, **_bogus_options):
"""
Low-level attribute patcher.
:param module module: Object to patch.
:param str name: Attribute to patch
:param replacement: The replacement value.
:param original: The original value (in case the object beeing patched uses descriptors or is plain weird).
:param bool aliases: If ``True`` patch all the attributes that have the same original value.
:returns: An :obj:`aspectlib.Rollback` object.
"""
rollback = Rollback()
seen = False
original = getattr(module, name) if original is UNSPECIFIED else original
location = module.__name__ if hasattr(module, '__name__') else type(module).__module__
target = module.__name__ if hasattr(module, '__name__') else type(module).__name__
try:
replacement.__module__ = location
except (TypeError, AttributeError):
pass
for alias in dir(module):
logdebug("alias:%s (%s)", alias, name)
if hasattr(module, alias):
obj = getattr(module, alias)
logdebug("- %s:%s (%s)", obj, original, obj is original)
if obj is original:
if aliases or alias == name:
logdebug("= saving %s on %s.%s ...", replacement, target, alias)
setattr(module, alias, replacement)
rollback.merge(lambda alias=alias: setattr(module, alias, original))
if alias == name:
seen = True
elif alias == name:
if ismethod(obj):
logdebug("= saving %s on %s.%s ...", replacement, target, alias)
setattr(module, alias, replacement)
rollback.merge(lambda alias=alias: setattr(module, alias, original))
seen = True
else:
raise AssertionError("%s.%s = %s is not %s." % (module, alias, obj, original))
if not seen:
warnings.warn('Setting %s.%s to %s. There was no previous definition, probably patching the wrong module.' % (
target, name, replacement
))
logdebug("= saving %s on %s.%s ...", replacement, target, name)
setattr(module, name, replacement)
rollback.merge(lambda: setattr(module, name, original))
return rollback
|
Low-level patcher for one function from a specified module.
.. warning:: You should not use this directly.
:returns: An :obj:`aspectlib.Rollback` object.
def patch_module_function(module, target, aspect, force_name=None, bag=BrokenBag, **options):
"""
Low-level patcher for one function from a specified module.
.. warning:: You should not use this directly.
:returns: An :obj:`aspectlib.Rollback` object.
"""
logdebug("patch_module_function (module=%s, target=%s, aspect=%s, force_name=%s, **options=%s",
module, target, aspect, force_name, options)
name = force_name or target.__name__
return patch_module(module, name, _checked_apply(aspect, target, module=module), original=target, **options)
|
Strip leading DLE and trailing DLE/ETX from packet.
:param packet: TSIP packet with leading DLE and trailing DLE/ETX.
:type packet: Binary string.
:return: TSIP packet with leading DLE and trailing DLE/ETX removed.
:raise: ``ValueError`` if `packet` does not start with DLE and end in DLE/ETX.
def unframe(packet):
"""
Strip leading DLE and trailing DLE/ETX from packet.
:param packet: TSIP packet with leading DLE and trailing DLE/ETX.
:type packet: Binary string.
:return: TSIP packet with leading DLE and trailing DLE/ETX removed.
:raise: ``ValueError`` if `packet` does not start with DLE and end in DLE/ETX.
"""
if is_framed(packet):
return packet.lstrip(CHR_DLE).rstrip(CHR_ETX).rstrip(CHR_DLE)
else:
raise ValueError('packet does not contain leading DLE and trailing DLE/ETX')
|
Add byte stuffing to TSIP packet.
:param packet: TSIP packet with byte stuffing. The packet must already
have been stripped or `ValueError` will be raised.
:type packet: Binary string.
:return: Packet with byte stuffing.
def stuff(packet):
"""
Add byte stuffing to TSIP packet.
:param packet: TSIP packet with byte stuffing. The packet must already
have been stripped or `ValueError` will be raised.
:type packet: Binary string.
:return: Packet with byte stuffing.
"""
if is_framed(packet):
raise ValueError('packet contains leading DLE and trailing DLE/ETX')
else:
return packet.replace(CHR_DLE, CHR_DLE + CHR_DLE)
|
Remove byte stuffing from a TSIP packet.
:param packet: TSIP packet with byte stuffing. The packet must already
have been stripped or `ValueError` will be raised.
:type packet: Binary string.
:return: Packet without byte stuffing.
def unstuff(packet):
"""
Remove byte stuffing from a TSIP packet.
:param packet: TSIP packet with byte stuffing. The packet must already
have been stripped or `ValueError` will be raised.
:type packet: Binary string.
:return: Packet without byte stuffing.
"""
if is_framed(packet):
raise ValueError('packet contains leading DLE and trailing DLE/ETX')
else:
return packet.replace(CHR_DLE + CHR_DLE, CHR_DLE)
|
Parse out filename from any specified extensions.
Returns rootname and string version of extension name.
Modified from 'pydrizzle.fileutil' to allow this
module to be independent of PyDrizzle/MultiDrizzle.
def parseFilename(filename):
"""
Parse out filename from any specified extensions.
Returns rootname and string version of extension name.
Modified from 'pydrizzle.fileutil' to allow this
module to be independent of PyDrizzle/MultiDrizzle.
"""
# Parse out any extension specified in filename
_indx = filename.find('[')
if _indx > 0:
# Read extension name provided
_fname = filename[:_indx]
extn = filename[_indx+1:-1]
# An extension was provided, so parse it out...
if repr(extn).find(',') > 1:
_extns = extn.split(',')
# Two values given for extension:
# for example, 'sci,1' or 'dq,1'
_extn = [_extns[0],int(_extns[1])]
elif repr(extn).find('/') > 1:
# We are working with GEIS group syntax
_indx = str(extn[:extn.find('/')])
_extn = [int(_indx)]
elif isinstance(extn, str):
# Only one extension value specified...
if extn.isdigit():
# We only have an extension number specified as a string...
_nextn = int(extn)
else:
# We only have EXTNAME specified...
_nextn = extn
_extn = [_nextn]
else:
# Only integer extension number given, or default of 0 is used.
_extn = [int(extn)]
else:
_fname = filename
_extn = None
return _fname,_extn
|
Returns the shape of the data array associated with this file.
def _shape(self):
""" Returns the shape of the data array associated with this file."""
hdu = self.open()
_shape = hdu.shape
if not self.inmemory:
self.close()
del hdu
return _shape
|
Returns the data array associated with this file/extenstion.
def _data(self):
""" Returns the data array associated with this file/extenstion."""
hdu = self.open()
_data = hdu.data.copy()
if not self.inmemory:
self.close()
del hdu
return _data
|
Returns the shape of the data array associated with this file.
def type(self):
""" Returns the shape of the data array associated with this file."""
hdu = self.open()
_type = hdu.data.dtype.name
if not self.inmemory:
self.close()
del hdu
return _type
|
Opens the file for subsequent access.
def open(self):
""" Opens the file for subsequent access. """
if self.handle is None:
self.handle = fits.open(self.fname, mode='readonly')
if self.extn:
if len(self.extn) == 1:
hdu = self.handle[self.extn[0]]
else:
hdu = self.handle[self.extn[0],self.extn[1]]
else:
hdu = self.handle[0]
if isinstance(hdu,fits.hdu.compressed.CompImageHDU):
self.compress = True
return hdu
|
Tests if the given times overlap with this measurement.
:param targetStartTime: the target start time.
:param duration: the duration
:return: true if the given times overlap with this measurement.
def overlapsWith(self, targetStartTime, duration):
"""
Tests if the given times overlap with this measurement.
:param targetStartTime: the target start time.
:param duration: the duration
:return: true if the given times overlap with this measurement.
"""
targetEndTime = targetStartTime + datetime.timedelta(days=0, seconds=duration)
return (self.startTime <= targetStartTime <= self.endTime) \
or (targetStartTime <= self.startTime <= targetEndTime)
|
Updates the current device status.
:param deviceName: the device name.
:param state: the state.
:param reason: the reason for the change.
:return:
def updateDeviceStatus(self, deviceName, state, reason=None):
"""
Updates the current device status.
:param deviceName: the device name.
:param state: the state.
:param reason: the reason for the change.
:return:
"""
logger.info('Updating recording device state for ' + deviceName + ' to ' + state.name +
('' if reason is None else '[reason: ' + reason + ']'))
currentState = self.recordingDevices.get(deviceName)
count = 0
if currentState is not None:
if currentState['state'] == MeasurementStatus.RECORDING.name:
count = currentState['count']
self.recordingDevices[deviceName] = {
'state': state.name,
'reason': reason,
'time': datetime.datetime.utcnow().strftime(DATETIME_FORMAT),
'count': count
}
|
For a device that is recording, updates the last timestamp so we now when we last received data.
:param deviceId: the device id.
:param dataCount: the no of items of data recorded in this batch.
:return:
def stillRecording(self, deviceId, dataCount):
"""
For a device that is recording, updates the last timestamp so we now when we last received data.
:param deviceId: the device id.
:param dataCount: the no of items of data recorded in this batch.
:return:
"""
status = self.recordingDevices[deviceId]
if status is not None:
if status['state'] == MeasurementStatus.RECORDING.name:
status['last'] = datetime.datetime.utcnow().strftime(DATETIME_FORMAT)
status['count'] = status['count'] + dataCount
|
loads the recording into memory and returns it as a Signal
:return:
def inflate(self):
"""
loads the recording into memory and returns it as a Signal
:return:
"""
if self.measurementParameters['accelerometerEnabled']:
if len(self.data) == 0:
logger.info('Loading measurement data for ' + self.name)
self.data = {name: self._loadXYZ(name) for name, value in self.recordingDevices.items()}
return True
else:
# TODO error handling
return False
|
Checks the state of each measurement and verifies their state, if an active measurement is now complete then
passes them to the completed measurement set, if failed then to the failed set, if failed and old then evicts.
:return:
def _sweep(self):
"""
Checks the state of each measurement and verifies their state, if an active measurement is now complete then
passes them to the completed measurement set, if failed then to the failed set, if failed and old then evicts.
:return:
"""
while self.running:
for am in list(self.activeMeasurements):
now = datetime.datetime.utcnow()
# devices were allocated and have completed == complete
recordingDeviceCount = len(am.recordingDevices)
if recordingDeviceCount > 0:
if all(entry['state'] == RecordStatus.COMPLETE.name for entry in am.recordingDevices.values()):
logger.info("Detected completedmeasurement " + am.id)
self._moveToComplete(am)
# we have reached the end time and we have either all failed devices or no devices == kill
if now > (am.endTime + datetime.timedelta(days=0, seconds=1)):
allFailed = all(entry['state'] == RecordStatus.FAILED.name
for entry in am.recordingDevices.values())
if (recordingDeviceCount > 0 and allFailed) or recordingDeviceCount == 0:
logger.warning("Detected failed measurement " + am.id + " with " + str(recordingDeviceCount)
+ " devices, allFailed: " + str(allFailed))
self._moveToFailed(am)
# we are well past the end time and we have failed devices or an ongoing recording == kill or deathbed
if now > (am.endTime + datetime.timedelta(days=0, seconds=self.maxTimeTilDeathbedSeconds)):
if any(entry['state'] == RecordStatus.FAILED.name for entry in am.recordingDevices.values()):
logger.warning("Detected failed and incomplete measurement " + am.id + ", assumed dead")
self._moveToFailed(am)
elif all(entry['state'] == RecordStatus.RECORDING.name for entry in am.recordingDevices.values()):
self._handleDeathbed(am)
time.sleep(0.1)
logger.warning("MeasurementCaretaker is now shutdown")
|
Schedules a new measurement with the given name.
:param name:
:param duration:
:param startTime:
:param description:
:return: a tuple
boolean: measurement was scheduled if true
message: description, generally only used as an error code
def schedule(self, name, duration, startTime, description=None):
"""
Schedules a new measurement with the given name.
:param name:
:param duration:
:param startTime:
:param description:
:return: a tuple
boolean: measurement was scheduled if true
message: description, generally only used as an error code
"""
if self._clashes(startTime, duration):
return False, MEASUREMENT_TIMES_CLASH
else:
am = ActiveMeasurement(name, startTime, duration, self.targetStateProvider.state, description=description)
logger.info("Scheduling measurement " + am.id + " for " + str(duration) + "s")
self.activeMeasurements.append(am)
devices = self.deviceController.scheduleMeasurement(am.id, am.duration, am.startTime)
anyFail = False
for device, status in devices.items():
if status == 200:
deviceStatus = RecordStatus.SCHEDULED
else:
deviceStatus = RecordStatus.FAILED
anyFail = True
am.updateDeviceStatus(device.deviceId, deviceStatus)
if anyFail:
am.status = MeasurementStatus.FAILED
else:
if am.status is MeasurementStatus.NEW:
am.status = MeasurementStatus.SCHEDULED
return True, None
|
verifies that this measurement does not clash with an already scheduled measurement.
:param startTime: the start time.
:param duration: the duration.
:return: true if the measurement is allowed.
def _clashes(self, startTime, duration):
"""
verifies that this measurement does not clash with an already scheduled measurement.
:param startTime: the start time.
:param duration: the duration.
:return: true if the measurement is allowed.
"""
return [m for m in self.activeMeasurements if m.overlapsWith(startTime, duration)]
|
Starts the measurement for the device.
:param deviceId: the device that is starting.
:param measurementId: the measurement that is started.
:return: true if it started (i.e. device and measurement exists).
def startMeasurement(self, measurementId, deviceId):
"""
Starts the measurement for the device.
:param deviceId: the device that is starting.
:param measurementId: the measurement that is started.
:return: true if it started (i.e. device and measurement exists).
"""
am, handler = self.getDataHandler(measurementId, deviceId)
if am is not None:
am.status = MeasurementStatus.RECORDING
am.updateDeviceStatus(deviceId, RecordStatus.RECORDING)
handler.start(am.idAsPath)
return True
else:
return False
|
finds the handler.
:param measurementId: the measurement
:param deviceId: the device.
:return: active measurement and handler
def getDataHandler(self, measurementId, deviceId):
"""
finds the handler.
:param measurementId: the measurement
:param deviceId: the device.
:return: active measurement and handler
"""
am = next((m for m in self.activeMeasurements if m.id == measurementId), None)
if am is None:
return None, None
else:
device = self.deviceController.getDevice(deviceId)
if device is None:
return None, None
else:
return am, device.dataHandler
|
Passes the data to the handler.
:param deviceId: the device the data comes from.
:param measurementId: the measurement id.
:param data: the data.
:return: true if the data was handled.
def recordData(self, measurementId, deviceId, data):
"""
Passes the data to the handler.
:param deviceId: the device the data comes from.
:param measurementId: the measurement id.
:param data: the data.
:return: true if the data was handled.
"""
am, handler = self.getDataHandler(measurementId, deviceId)
if handler is not None:
am.stillRecording(deviceId, len(data))
handler.handle(data)
return True
else:
logger.error('Received data for unknown handler ' + deviceId + '/' + measurementId)
return False
|
Completes the measurement session.
:param deviceId: the device id.
:param measurementId: the measurement id.
:return: true if it was completed.
def completeMeasurement(self, measurementId, deviceId):
"""
Completes the measurement session.
:param deviceId: the device id.
:param measurementId: the measurement id.
:return: true if it was completed.
"""
am, handler = self.getDataHandler(measurementId, deviceId)
if handler is not None:
handler.stop(measurementId)
am.updateDeviceStatus(deviceId, RecordStatus.COMPLETE)
return True
else:
return False
|
Fails the measurement session.
:param deviceName: the device name.
:param measurementId: the measurement name.
:param failureReason: why it failed.
:return: true if it was completed.
def failMeasurement(self, measurementId, deviceName, failureReason=None):
"""
Fails the measurement session.
:param deviceName: the device name.
:param measurementId: the measurement name.
:param failureReason: why it failed.
:return: true if it was completed.
"""
am, handler = self.getDataHandler(measurementId, deviceName)
if handler is not None:
am.updateDeviceStatus(deviceName, RecordStatus.FAILED, reason=failureReason)
handler.stop(measurementId)
return True
else:
return False
|
Deletes the named measurement from the completed measurement store if it exists.
:param measurementId:
:return:
String: error messages
Integer: count of measurements deleted
def _deleteCompletedMeasurement(self, measurementId):
"""
Deletes the named measurement from the completed measurement store if it exists.
:param measurementId:
:return:
String: error messages
Integer: count of measurements deleted
"""
message, count, deleted = self.deleteFrom(measurementId, self.completeMeasurements)
if count is 0:
message, count, deleted = self.deleteFrom(measurementId, self.failedMeasurements)
return message, count, deleted
|
Reloads the completed measurements from the backing store.
def reloadCompletedMeasurements(self):
"""
Reloads the completed measurements from the backing store.
"""
from pathlib import Path
reloaded = [self.load(x.resolve()) for x in Path(self.dataDir).glob('*/*/*') if x.is_dir()]
logger.info('Reloaded ' + str(len(reloaded)) + ' completed measurements')
self.completeMeasurements = [x for x in reloaded if x is not None and x.status == MeasurementStatus.COMPLETE]
self.failedMeasurements = [x for x in reloaded if x is not None and x.status == MeasurementStatus.FAILED]
|
Gets all available measurements.
:param measurementStatus return only the measurements in the given state.
:return:
def getMeasurements(self, measurementStatus=None):
"""
Gets all available measurements.
:param measurementStatus return only the measurements in the given state.
:return:
"""
if measurementStatus is None:
return self.activeMeasurements + self.completeMeasurements + self.failedMeasurements
elif measurementStatus == MeasurementStatus.COMPLETE:
return self.completeMeasurements
elif measurementStatus == MeasurementStatus.FAILED:
return self.failedMeasurements
elif measurementStatus == MeasurementStatus.DYING:
return list(self.deathBed.keys())
else:
return [x for x in self.activeMeasurements if x.status == measurementStatus]
|
Gets the measurement with the given id.
:param measurementId: the id.
:param measurementStatus: the status of the requested measurement.
:return: the matching measurement or none if it doesn't exist.
def getMeasurement(self, measurementId, measurementStatus=None):
"""
Gets the measurement with the given id.
:param measurementId: the id.
:param measurementStatus: the status of the requested measurement.
:return: the matching measurement or none if it doesn't exist.
"""
return next((x for x in self.getMeasurements(measurementStatus) if x.id == measurementId), None)
|
Writes the measurement metadata to disk on completion.
:param activeMeasurement: the measurement that has completed.
:returns the persisted metadata.
def store(self, measurement):
"""
Writes the measurement metadata to disk on completion.
:param activeMeasurement: the measurement that has completed.
:returns the persisted metadata.
"""
os.makedirs(self._getPathToMeasurementMetaDir(measurement.idAsPath), exist_ok=True)
output = marshal(measurement, measurementFields)
with open(self._getPathToMeasurementMetaFile(measurement.idAsPath), 'w') as outfile:
json.dump(output, outfile)
return output
|
Loads a CompletedMeasurement from the path.á
:param path: the path at which the data is found.
:return: the measurement
def load(self, path):
"""
Loads a CompletedMeasurement from the path.á
:param path: the path at which the data is found.
:return: the measurement
"""
meta = self._loadMetaFromJson(path)
return CompleteMeasurement(meta, self.dataDir) if meta is not None else None
|
Reads the json meta into memory.
:return: the meta.
def _loadMetaFromJson(self, path):
"""
Reads the json meta into memory.
:return: the meta.
"""
try:
with (path / 'metadata.json').open() as infile:
return json.load(infile)
except FileNotFoundError:
logger.error('Metadata does not exist at ' + str(path))
return None
|
Edits the specified measurement with the provided data.
:param measurementId: the measurement id.
:param data: the data to update.
:return: true if the measurement was edited
def editMeasurement(self, measurementId, data):
"""
Edits the specified measurement with the provided data.
:param measurementId: the measurement id.
:param data: the data to update.
:return: true if the measurement was edited
"""
oldMeasurement = self.getMeasurement(measurementId, measurementStatus=MeasurementStatus.COMPLETE)
if oldMeasurement:
import copy
newMeasurement = copy.deepcopy(oldMeasurement)
deleteOld = False
createdFilteredCopy = False
newName = data.get('name', None)
newDesc = data.get('description', None)
newStart = float(data.get('start', 0))
newEnd = float(data.get('end', oldMeasurement.duration))
newDuration = newEnd - newStart
newDevices = data.get('devices', None)
if newName:
logger.info('Updating name from ' + oldMeasurement.name + ' to ' + newName)
newMeasurement.updateName(newName)
createdFilteredCopy = True
deleteOld = True
if newDesc:
logger.info('Updating description from ' + str(oldMeasurement.description) + ' to ' + str(newDesc))
newMeasurement.description = newDesc
if newDuration != oldMeasurement.duration:
logger.info('Copying measurement to allow support new duration ' + str(newDuration))
if oldMeasurement.name == newMeasurement.name:
newMeasurement.updateName(newMeasurement.name + '-' + str(int(time.time())))
newMeasurement.duration = newDuration
createdFilteredCopy = True
if createdFilteredCopy:
logger.info('Copying measurement data from ' + oldMeasurement.idAsPath + ' to ' + newMeasurement.idAsPath)
newMeasurementPath = self._getPathToMeasurementMetaDir(newMeasurement.idAsPath)
dataSearchPattern = self._getPathToMeasurementMetaDir(oldMeasurement.idAsPath) + '/**/data.out'
newDataCountsByDevice = [self._filterCopy(dataFile, newStart, newEnd, newMeasurementPath)
for dataFile in glob.glob(dataSearchPattern)]
for device, count in newDataCountsByDevice:
newMeasurement.recordingDevices.get(device)['count'] = count
self.store(newMeasurement)
if newDevices:
for renames in newDevices:
logger.info('Updating device name from ' + str(renames[0]) + ' to ' + str(renames[1]))
deviceState = newMeasurement.recordingDevices.get(renames[0])
newMeasurement.recordingDevices[renames[1]] = deviceState
del newMeasurement.recordingDevices[renames[0]]
os.rename(os.path.join(self._getPathToMeasurementMetaDir(newMeasurement.idAsPath), renames[0]),
os.path.join(self._getPathToMeasurementMetaDir(newMeasurement.idAsPath), renames[1]))
self.store(newMeasurement)
if deleteOld or createdFilteredCopy or newDevices:
self.completeMeasurements.append(newMeasurement)
if deleteOld:
self.delete(oldMeasurement.id)
return True
else:
return False
|
Copies the data file to a new file in the tmp dir, filtering it according to newStart and newEnd and adjusting
the times as appropriate so it starts from 0.
:param dataFile: the input file.
:param newStart: the new start time.
:param newEnd: the new end time.
:param newDataDir: the tmp dir to write to.
:return: the device name & no of rows in the data.
def _filterCopy(self, dataFile, newStart, newEnd, newDataDir):
"""
Copies the data file to a new file in the tmp dir, filtering it according to newStart and newEnd and adjusting
the times as appropriate so it starts from 0.
:param dataFile: the input file.
:param newStart: the new start time.
:param newEnd: the new end time.
:param newDataDir: the tmp dir to write to.
:return: the device name & no of rows in the data.
"""
import csv
pathToData = os.path.split(dataFile)
dataFileName = pathToData[1]
dataDeviceName = os.path.split(pathToData[0])[1]
os.makedirs(os.path.join(newDataDir, dataDeviceName), exist_ok=True)
outputFile = os.path.join(newDataDir, dataDeviceName, dataFileName)
dataCount = 0
rowNum = 0
with open(dataFile, mode='rt', newline='') as dataIn, open(outputFile, mode='wt', newline='') as dataOut:
writer = csv.writer(dataOut, delimiter=',')
for row in csv.reader(dataIn, delimiter=','):
if len(row) > 0:
time = float(row[0])
if newStart <= time <= newEnd:
newRow = row[:]
if newStart > 0:
newRow[0] = "{0:.3f}".format(time - newStart)
writer.writerow(newRow)
dataCount += 1
else:
logger.warning('Ignoring empty row ' + str(rowNum) + ' in ' + str(dataFile))
rowNum += 1
return dataDeviceName, dataCount
|
- Converts waiver fits sciece and data quality files to MEF format
- Converts GEIS science and data quality files to MEF format
- Checks for stis association tables and splits them into single imsets
- Removes files with EXPTIME=0 and the corresponding ivm files
- Removes files with NGOODPIX == 0 (to exclude saturated images)
- Removes files with missing PA_V3 keyword
The list of science files should match the list of ivm files at the end.
def checkFiles(filelist,ivmlist = None):
"""
- Converts waiver fits sciece and data quality files to MEF format
- Converts GEIS science and data quality files to MEF format
- Checks for stis association tables and splits them into single imsets
- Removes files with EXPTIME=0 and the corresponding ivm files
- Removes files with NGOODPIX == 0 (to exclude saturated images)
- Removes files with missing PA_V3 keyword
The list of science files should match the list of ivm files at the end.
"""
toclose = False
if isinstance(filelist[0], str):
toclose = True
newfilelist, ivmlist = checkFITSFormat(filelist, ivmlist)
# check for STIS association files. This must be done before
# the other checks in order to handle correctly stis
# assoc files
newfilelist, ivmlist = checkStisFiles(newfilelist, ivmlist)
if newfilelist == []:
return [], []
removed_expt_files = check_exptime(newfilelist)
newfilelist, ivmlist = update_input(newfilelist, ivmlist, removed_expt_files)
if newfilelist == []:
return [], []
removed_ngood_files = checkNGOODPIX(newfilelist)
newfilelist, ivmlist = update_input(newfilelist, ivmlist, removed_ngood_files)
if newfilelist == []:
return [], []
removed_pav3_files = checkPA_V3(newfilelist)
newfilelist, ivmlist = update_input(newfilelist, ivmlist, removed_pav3_files)
newfilelist, ivmlist = update_input(newfilelist, ivmlist,[])
if newfilelist == []:
return [], []
if toclose:
newfilelist = [hdul.filename() for hdul in newfilelist]
return newfilelist, ivmlist
|
This code will check whether or not files are GEIS or WAIVER FITS and
convert them to MEF if found. It also keeps the IVMLIST consistent with
the input filelist, in the case that some inputs get dropped during
the check/conversion.
def checkFITSFormat(filelist, ivmlist=None):
"""
This code will check whether or not files are GEIS or WAIVER FITS and
convert them to MEF if found. It also keeps the IVMLIST consistent with
the input filelist, in the case that some inputs get dropped during
the check/conversion.
"""
if ivmlist is None:
ivmlist = [None for l in filelist]
sci_ivm = list(zip(filelist, ivmlist))
removed_files, translated_names, newivmlist = convert2fits(sci_ivm)
newfilelist, ivmlist = update_input(filelist, ivmlist, removed_files)
if newfilelist == [] and translated_names == []:
return [], []
elif translated_names != []:
newfilelist.extend(translated_names)
ivmlist.extend(newivmlist)
return newfilelist, ivmlist
|
Removes files with EXPTIME==0 from filelist.
def check_exptime(filelist):
"""
Removes files with EXPTIME==0 from filelist.
"""
toclose = False
removed_files = []
for f in filelist:
if isinstance(f, str):
f = fits.open(f)
toclose = True
try:
exptime = f[0].header['EXPTIME']
except KeyError:
removed_files.append(f)
print("Warning: There are files without keyword EXPTIME")
continue
if exptime <= 0:
removed_files.append(f)
print("Warning: There are files with zero exposure time: keyword EXPTIME = 0.0")
if removed_files != []:
print("Warning: Removing the following files from input list")
for f in removed_files:
print('\t',f.filename() or "")
return removed_files
|
Only for ACS, WFC3 and STIS, check NGOODPIX
If all pixels are 'bad' on all chips, exclude this image
from further processing.
Similar checks requiring comparing 'driz_sep_bits' against
WFPC2 c1f.fits arrays and NICMOS DQ arrays will need to be
done separately (and later).
def checkNGOODPIX(filelist):
"""
Only for ACS, WFC3 and STIS, check NGOODPIX
If all pixels are 'bad' on all chips, exclude this image
from further processing.
Similar checks requiring comparing 'driz_sep_bits' against
WFPC2 c1f.fits arrays and NICMOS DQ arrays will need to be
done separately (and later).
"""
toclose = False
removed_files = []
supported_instruments = ['ACS','STIS','WFC3']
for inputfile in filelist:
if isinstance(inputfile, str):
if fileutil.getKeyword(inputfile,'instrume') in supported_instruments:
inputfile = fits.open(inputfile)
toclose = True
elif inputfile[0].header['instrume'] not in supported_instruments:
continue
ngood = 0
for extn in inputfile:
if 'EXTNAME' in extn.header and extn.header['EXTNAME'] == 'SCI':
ngood += extn.header['NGOODPIX']
if (ngood == 0):
removed_files.append(inputfile)
if toclose:
inputfile.close()
if removed_files != []:
print("Warning: Files without valid pixels detected: keyword NGOODPIX = 0.0")
print("Warning: Removing the following files from input list")
for f in removed_files:
print('\t',f.filename() or "")
return removed_files
|
Input: A stis multiextension file
Output: Number of stis science extensions in input
def stisObsCount(input):
"""
Input: A stis multiextension file
Output: Number of stis science extensions in input
"""
count = 0
toclose = False
if isinstance(input, str):
input = fits.open(input)
toclose = True
for ext in input:
if 'extname' in ext.header:
if (ext.header['extname'].upper() == 'SCI'):
count += 1
if toclose:
input.close()
return count
|
Split a STIS association file into multiple imset MEF files.
Split the corresponding spt file if present into single spt files.
If an spt file can't be split or is missing a Warning is printed.
Returns
-------
names: list
a list with the names of the new flt files.
def splitStis(stisfile, sci_count):
"""
Split a STIS association file into multiple imset MEF files.
Split the corresponding spt file if present into single spt files.
If an spt file can't be split or is missing a Warning is printed.
Returns
-------
names: list
a list with the names of the new flt files.
"""
newfiles = []
toclose = False
if isinstance(stisfile, str):
f = fits.open(stisfile)
toclose = True
else:
f = stisfile
hdu0 = f[0].copy()
stisfilename = stisfile.filename()
for count in range(1,sci_count+1):
fitsobj = fits.HDUList()
fitsobj.append(hdu0)
hdu = f[('sci',count)].copy()
fitsobj.append(hdu)
rootname = hdu.header['EXPNAME']
newfilename = fileutil.buildNewRootname(rootname, extn='_flt.fits')
try:
# Verify error array exists
if f[('err', count)].data is None:
raise ValueError
# Verify dq array exists
if f[('dq', count)].data is None:
raise ValueError
# Copy the err extension
hdu = f[('err',count)].copy()
fitsobj.append(hdu)
# Copy the dq extension
hdu = f[('dq',count)].copy()
fitsobj.append(hdu)
fitsobj[1].header['EXTVER'] = 1
fitsobj[2].header['EXTVER'] = 1
fitsobj[3].header['EXTVER'] = 1
except ValueError:
print('\nWarning:')
print('Extension version %d of the input file %s does not' %(count, stisfile))
print('contain all required image extensions. Each must contain')
print('populates SCI, ERR and DQ arrays.')
continue
# Determine if the file you wish to create already exists on the disk.
# If the file does exist, replace it.
if (os.path.exists(newfilename)):
os.remove(newfilename)
print(" Replacing "+newfilename+"...")
# Write out the new file
fitsobj.writeto(newfilename)
# Insure returned HDUList is associated with a file
fitsobj.close()
fitsobj = fits.open(newfilename, mode='update')
newfiles.append(fitsobj) # Return HDUList, not filename
f.close()
sptfilename = fileutil.buildNewRootname(stisfilename, extn='_spt.fits')
try:
sptfile = fits.open(sptfilename)
except IOError:
print('SPT file not found %s \n' % sptfilename)
return newfiles
if sptfile:
hdu0 = sptfile[0].copy()
try:
for count in range(1,sci_count+1):
fitsobj = fits.HDUList()
fitsobj.append(hdu0)
hdu = sptfile[count].copy()
fitsobj.append(hdu)
rootname = hdu.header['EXPNAME']
newfilename = fileutil.buildNewRootname(rootname, extn='_spt.fits')
fitsobj[1].header['EXTVER'] = 1
if (os.path.exists(newfilename)):
os.remove(newfilename)
print(" Replacing "+newfilename+"...")
# Write out the new file
fitsobj.writeto(newfilename)
except:
print("Warning: Unable to split spt file %s " % sptfilename)
if toclose:
sptfile.close()
return newfiles
|
Several kw which are usually in the primary header
are in the extension header for STIS. They are copied to
the primary header for convenience.
List if kw:
'DATE-OBS', 'EXPEND', 'EXPSTART', 'EXPTIME'
def stisExt2PrimKw(stisfiles):
"""
Several kw which are usually in the primary header
are in the extension header for STIS. They are copied to
the primary header for convenience.
List if kw:
'DATE-OBS', 'EXPEND', 'EXPSTART', 'EXPTIME'
"""
kw_list = ['DATE-OBS', 'EXPEND', 'EXPSTART', 'EXPTIME']
for sfile in stisfiles:
toclose = False
if isinstance(sfile, str):
sfile = fits.open(sfile, mode='update')
toclose = True
#d = {}
for k in kw_list:
sfile[0].header[k] = sfile[1].header[k]
sfile[0].header.comments[k] = "Copied from extension header"
if toclose:
sfile.close()
|
Checks if a file is in WAIVER of GEIS format and converts it to MEF
def convert2fits(sci_ivm):
"""
Checks if a file is in WAIVER of GEIS format and converts it to MEF
"""
removed_files = []
translated_names = []
newivmlist = []
for file in sci_ivm:
#find out what the input is
# if science file is not found on disk, add it to removed_files for removal
try:
imgfits,imgtype = fileutil.isFits(file[0])
except IOError:
print("Warning: File %s could not be found" %file[0])
print("Warning: Removing file %s from input list" %file[0])
removed_files.append(file[0])
continue
# Check for existence of waiver FITS input, and quit if found.
# Or should we print a warning and continue but not use that file
if imgfits and imgtype == 'waiver':
newfilename = waiver2mef(file[0], convert_dq=True)
if newfilename is None:
print("Removing file %s from input list - could not convert WAIVER format to MEF\n" %file[0])
removed_files.append(file[0])
else:
removed_files.append(file[0])
translated_names.append(newfilename)
newivmlist.append(file[1])
# If a GEIS image is provided as input, create a new MEF file with
# a name generated using 'buildFITSName()'
# Convert the corresponding data quality file if present
if not imgfits:
newfilename = geis2mef(file[0], convert_dq=True)
if newfilename is None:
print("Removing file %s from input list - could not convert GEIS format to MEF\n" %file[0])
removed_files.append(file[0])
else:
removed_files.append(file[0])
translated_names.append(newfilename)
newivmlist.append(file[1])
return removed_files, translated_names, newivmlist
|
Converts a GEIS science file and its corresponding
data quality file (if present) to MEF format
Writes out both files to disk.
Returns the new name of the science image.
def waiver2mef(sciname, newname=None, convert_dq=True, writefits=True):
"""
Converts a GEIS science file and its corresponding
data quality file (if present) to MEF format
Writes out both files to disk.
Returns the new name of the science image.
"""
if isinstance(sciname, fits.HDUList):
filename = sciname.filename()
else:
filename = sciname
try:
clobber = True
fimg = convertwaiveredfits.convertwaiveredfits(filename)
#check for the existence of a data quality file
_dqname = fileutil.buildNewRootname(filename, extn='_c1f.fits')
dqexists = os.path.exists(_dqname)
if convert_dq and dqexists:
try:
dqfile = convertwaiveredfits.convertwaiveredfits(_dqname)
dqfitsname = fileutil.buildNewRootname(_dqname, extn='_c1h.fits')
except Exception:
print("Could not read data quality file %s" % _dqname)
if writefits:
# User wants to make a FITS copy and update it
# using the filename they have provided
rname = fileutil.buildNewRootname(filename)
fitsname = fileutil.buildNewRootname(rname, extn='_c0h.fits')
# Write out GEIS image as multi-extension FITS.
fexists = os.path.exists(fitsname)
if (fexists and clobber) or not fexists:
print('Writing out WAIVERED as MEF to ', fitsname)
if ASTROPY_VER_GE13:
fimg.writeto(fitsname, overwrite=clobber)
else:
fimg.writeto(fitsname, clobber=clobber)
if dqexists:
print('Writing out WAIVERED as MEF to ', dqfitsname)
if ASTROPY_VER_GE13:
dqfile.writeto(dqfitsname, overwrite=clobber)
else:
dqfile.writeto(dqfitsname, clobber=clobber)
# Now close input GEIS image, and open writable
# handle to output FITS image instead...
fimg.close()
del fimg
fimg = fits.open(fitsname, mode='update', memmap=False)
return fimg
except IOError:
print('Warning: File %s could not be found' % sciname)
return None
|
Converts a GEIS science file and its corresponding
data quality file (if present) to MEF format
Writes out both files to disk.
Returns the new name of the science image.
def geis2mef(sciname, convert_dq=True):
"""
Converts a GEIS science file and its corresponding
data quality file (if present) to MEF format
Writes out both files to disk.
Returns the new name of the science image.
"""
clobber = True
mode = 'update'
memmap = True
# Input was specified as a GEIS image, but no FITS copy
# exists. Read it in with 'readgeis' and make a copy
# then open the FITS copy...
try:
# Open as a GEIS image for reading only
fimg = readgeis.readgeis(sciname)
except Exception:
raise IOError("Could not open GEIS input: %s" % sciname)
#check for the existence of a data quality file
_dqname = fileutil.buildNewRootname(sciname, extn='.c1h')
dqexists = os.path.exists(_dqname)
if dqexists:
try:
dqfile = readgeis.readgeis(_dqname)
dqfitsname = fileutil.buildFITSName(_dqname)
except Exception:
print("Could not read data quality file %s" % _dqname)
# Check to see if user wanted to update GEIS header.
# or write out a multi-extension FITS file and return a handle to it
# User wants to make a FITS copy and update it
# using the filename they have provided
fitsname = fileutil.buildFITSName(sciname)
# Write out GEIS image as multi-extension FITS.
fexists = os.path.exists(fitsname)
if (fexists and clobber) or not fexists:
print('Writing out GEIS as MEF to ', fitsname)
if ASTROPY_VER_GE13:
fimg.writeto(fitsname, overwrite=clobber)
else:
fimg.writeto(fitsname, clobber=clobber)
if dqexists:
print('Writing out GEIS as MEF to ', dqfitsname)
if ASTROPY_VER_GE13:
dqfile.writeto(dqfitsname, overwrite=clobber)
else:
dqfile.writeto(dqfitsname, clobber=clobber)
# Now close input GEIS image, and open writable
# handle to output FITS image instead...
fimg.close()
del fimg
fimg = fits.open(fitsname, mode=mode, memmap=memmap)
return fimg
|
Retrieve journals attribute for this very Issue
def journals(self):
"""
Retrieve journals attribute for this very Issue
"""
try:
target = self._item_path
json_data = self._redmine.get(target % str(self.id),
parms={'include': 'journals'})
data = self._redmine.unwrap_json(None, json_data)
journals = [Journal(redmine=self._redmine,
data=journal,
type='issue_journal')
for journal in data['issue']['journals']]
return journals
except Exception:
return []
|
Save all changes back to Redmine with optional notes.
def save(self, notes=None):
'''Save all changes back to Redmine with optional notes.'''
# Capture the notes if given
if notes:
self._changes['notes'] = notes
# Call the base-class save function
super(Issue, self).save()
|
Save all changes and set to the given new_status
def set_status(self, new_status, notes=None):
'''Save all changes and set to the given new_status'''
self.status_id = new_status
try:
self.status['id'] = self.status_id
# We don't have the id to name mapping, so blank the name
self.status['name'] = None
except:
pass
self.save(notes)
|
Save all changes and resolve this issue
def resolve(self, notes=None):
'''Save all changes and resolve this issue'''
self.set_status(self._redmine.ISSUE_STATUS_ID_RESOLVED, notes=notes)
|
Save all changes and close this issue
def close(self, notes=None):
'''Save all changes and close this issue'''
self.set_status(self._redmine.ISSUE_STATUS_ID_CLOSED, notes=notes)
|
Return an object derived from the given json data.
def _objectify(self, json_data=None, data={}):
'''Return an object derived from the given json data.'''
if json_data:
# Parse the data
try:
data = json.loads(json_data)
except ValueError:
# If parsing failed, then raise the string which likely
# contains an error message instead of data
raise RedmineError(json_data)
# Check to see if there is a data wrapper
# Some replies will have {'issue':{<data>}} instead of just {<data>}
try:
data = data[self._item_type]
except KeyError:
pass
# If there's no ID but a source path
if ('id' not in data) and ('_source_path' in data):
# use the path between /projects/ and .json as the ID
data['id'] = data['_source_path']\
.partition('/projects/')[2]\
.partition('.json')[0]
# Call the base class objectify method
return super(Redmine_Wiki_Pages_Manager, self)._objectify(data=data)
|
Create a new item with the provided dict information
at the given page_name. Returns the new item.
As of version 2.2 of Redmine, this doesn't seem to function.
def new(self, page_name, **dict):
'''
Create a new item with the provided dict information
at the given page_name. Returns the new item.
As of version 2.2 of Redmine, this doesn't seem to function.
'''
self._item_new_path = '/projects/%s/wiki/%s.json' % \
(self._project.identifier, page_name)
# Call the base class new method
return super(Redmine_Wiki_Pages_Manager, self).new(**dict)
|
Set up this object based on the capabilities of the
known versions of Redmine
def _set_version(self, version):
'''
Set up this object based on the capabilities of the
known versions of Redmine
'''
# Store the version we are evaluating
self.version = version or None
# To evaluate the version capabilities,
# assume the best-case if no version is provided.
version_check = version or 9999.0
if version_check < 1.0:
raise RedmineError('This library will only work with '
'Redmine version 1.0 and higher.')
## SECURITY AUGMENTATION
# All versions support the key in the request
# (http://server/stuff.json?key=blah)
# But versions 1.1 and higher can put the key in a header field
# for better security.
# If no version was provided (0.0) then assume we should
# set the key with the request.
self.key_in_header = version >= 1.1
# it puts the key in the header or
# it gets the hose, but not for 1.0.
self.impersonation_supported = version_check >= 2.2
self.has_project_memberships = version_check >= 1.4
self.has_project_versions = version_check >= 1.3
self.has_wiki_pages = version_check >= 2.2
## ITEM MANAGERS
# Step through all the item managers by version
# and instatiate and item manager for that item.
for manager_version in self._item_managers_by_version:
if version_check >= manager_version:
managers = self._item_managers_by_version[manager_version]
for attribute_name, item in managers.iteritems():
setattr(self, attribute_name,
Redmine_Items_Manager(self, item))
|
extracts a new TargetState object from the specified configuration
:param targetStateConfig: the config dict.
:param existingTargetState: the existing state
:return:
def loadTargetState(targetStateConfig, existingTargetState=None):
"""
extracts a new TargetState object from the specified configuration
:param targetStateConfig: the config dict.
:param existingTargetState: the existing state
:return:
"""
from analyser.common.targetstatecontroller import TargetState
targetState = TargetState() if existingTargetState is None else existingTargetState
# FIXFIX validate
if targetStateConfig is not None:
val = targetStateConfig.get('fs')
if val is not None:
targetState.fs = val
val = targetStateConfig.get('samplesPerBatch')
if val is not None:
targetState.samplesPerBatch = val
val = targetStateConfig.get('gyroEnabled')
if val is not None:
targetState.gyroEnabled = val
val = targetStateConfig.get('gyroSens')
if val is not None:
targetState.gyroSens = val
val = targetStateConfig.get('accelerometerEnabled')
if val is not None:
targetState.accelerometerEnabled = val
val = targetStateConfig.get('accelerometerSens')
if val is not None:
targetState.accelerometerSens = val
return targetState
|
Converts input bit flags to a single integer value (bitmask) or `None`.
When input is a list of flags (either a Python list of integer flags or a
sting of comma- or '+'-separated list of flags), the returned bitmask
is obtained by summing input flags.
.. note::
In order to flip the bits of the returned bitmask,
for input of `str` type, prepend '~' to the input string. '~' must
be prepended to the *entire string* and not to each bit flag! For
input that is already a bitmask or a Python list of bit flags, set
`flip_bits` for `True` in order to flip the bits of the returned
bitmask.
Parameters
----------
bit_flags : int, str, list, None
An integer bitmask or flag, `None`, a string of comma- or
'+'-separated list of integer bit flags, or a Python list of integer
bit flags. If `bit_flags` is a `str` and if it is prepended with '~',
then the output bitmask will have its bits flipped (compared to simple
sum of input flags). For input `bit_flags` that is already a bitmask
or a Python list of bit flags, bit-flipping can be controlled through
`flip_bits` parameter.
flip_bits : bool, None
Indicates whether or not to flip the bits of the returned bitmask
obtained from input bit flags. This parameter must be set to `None`
when input `bit_flags` is either `None` or a Python list of flags.
Returns
-------
bitmask : int or None
Returns and integer bit mask formed from the input bit value
or `None` if input `bit_flags` parameter is `None` or an empty string.
If input string value was prepended with '~' (or `flip_bits` was
set to `True`), then returned value will have its bits flipped
(inverse mask).
Examples
--------
>>> from stsci.tools.bitmask import interpret_bit_flags
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags(28))
'0000000000011100'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('4,8,16'))
'0000000000011100'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('~4,8,16'))
'1111111111100011'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('~(4+8+16)'))
'1111111111100011'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags([4, 8, 16]))
'0000000000011100'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags([4, 8, 16], flip_bits=True))
'1111111111100011'
def interpret_bit_flags(bit_flags, flip_bits=None):
"""
Converts input bit flags to a single integer value (bitmask) or `None`.
When input is a list of flags (either a Python list of integer flags or a
sting of comma- or '+'-separated list of flags), the returned bitmask
is obtained by summing input flags.
.. note::
In order to flip the bits of the returned bitmask,
for input of `str` type, prepend '~' to the input string. '~' must
be prepended to the *entire string* and not to each bit flag! For
input that is already a bitmask or a Python list of bit flags, set
`flip_bits` for `True` in order to flip the bits of the returned
bitmask.
Parameters
----------
bit_flags : int, str, list, None
An integer bitmask or flag, `None`, a string of comma- or
'+'-separated list of integer bit flags, or a Python list of integer
bit flags. If `bit_flags` is a `str` and if it is prepended with '~',
then the output bitmask will have its bits flipped (compared to simple
sum of input flags). For input `bit_flags` that is already a bitmask
or a Python list of bit flags, bit-flipping can be controlled through
`flip_bits` parameter.
flip_bits : bool, None
Indicates whether or not to flip the bits of the returned bitmask
obtained from input bit flags. This parameter must be set to `None`
when input `bit_flags` is either `None` or a Python list of flags.
Returns
-------
bitmask : int or None
Returns and integer bit mask formed from the input bit value
or `None` if input `bit_flags` parameter is `None` or an empty string.
If input string value was prepended with '~' (or `flip_bits` was
set to `True`), then returned value will have its bits flipped
(inverse mask).
Examples
--------
>>> from stsci.tools.bitmask import interpret_bit_flags
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags(28))
'0000000000011100'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('4,8,16'))
'0000000000011100'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('~4,8,16'))
'1111111111100011'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('~(4+8+16)'))
'1111111111100011'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags([4, 8, 16]))
'0000000000011100'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags([4, 8, 16], flip_bits=True))
'1111111111100011'
"""
has_flip_bits = flip_bits is not None
flip_bits = bool(flip_bits)
allow_non_flags = False
if _is_int(bit_flags):
return (~int(bit_flags) if flip_bits else int(bit_flags))
elif bit_flags is None:
if has_flip_bits:
raise TypeError(
"Keyword argument 'flip_bits' must be set to 'None' when "
"input 'bit_flags' is None."
)
return None
elif isinstance(bit_flags, six.string_types):
if has_flip_bits:
raise TypeError(
"Keyword argument 'flip_bits' is not permitted for "
"comma-separated string lists of bit flags. Prepend '~' to "
"the string to indicate bit-flipping."
)
bit_flags = str(bit_flags).strip()
if bit_flags.upper() in ['', 'NONE', 'INDEF']:
return None
# check whether bitwise-NOT is present and if it is, check that it is
# in the first position:
bitflip_pos = bit_flags.find('~')
if bitflip_pos == 0:
flip_bits = True
bit_flags = bit_flags[1:].lstrip()
else:
if bitflip_pos > 0:
raise ValueError("Bitwise-NOT must precede bit flag list.")
flip_bits = False
# basic check for correct use of parenthesis:
while True:
nlpar = bit_flags.count('(')
nrpar = bit_flags.count(')')
if nlpar == 0 and nrpar == 0:
break
if nlpar != nrpar:
raise ValueError("Unbalanced parantheses in bit flag list.")
lpar_pos = bit_flags.find('(')
rpar_pos = bit_flags.rfind(')')
if lpar_pos > 0 or rpar_pos < (len(bit_flags) - 1):
raise ValueError("Incorrect syntax (incorrect use of "
"parenthesis) in bit flag list.")
bit_flags = bit_flags[1:-1].strip()
if ',' in bit_flags:
bit_flags = bit_flags.split(',')
elif '+' in bit_flags:
bit_flags = bit_flags.split('+')
else:
if bit_flags == '':
raise ValueError(
"Empty bit flag lists not allowed when either bitwise-NOT "
"or parenthesis are present."
)
bit_flags = [bit_flags]
allow_non_flags = len(bit_flags) == 1
elif hasattr(bit_flags, '__iter__'):
if not all([_is_int(flag) for flag in bit_flags]):
raise TypeError("Each bit flag in a list must be an integer.")
else:
raise TypeError("Unsupported type for argument 'bit_flags'.")
bitset = set(map(int, bit_flags))
if len(bitset) != len(bit_flags):
warnings.warn("Duplicate bit flags will be ignored")
bitmask = 0
for v in bitset:
if not is_bit_flag(v) and not allow_non_flags:
raise ValueError("Input list contains invalid (not powers of two) "
"bit flags")
bitmask += v
if flip_bits:
bitmask = ~bitmask
return bitmask
|
bitfield_to_boolean_mask(bitfield, ignore_flags=None, flip_bits=None, \
good_mask_value=True, dtype=numpy.bool\_)
Converts an array of bit fields to a boolean (or integer) mask array
according to a bitmask constructed from the supplied bit flags (see
``ignore_flags`` parameter).
This function is particularly useful to convert data quality arrays to
boolean masks with selective filtering of DQ flags.
Parameters
----------
bitfield : numpy.ndarray
An array of bit flags. By default, values different from zero are
interpreted as "bad" values and values equal to zero are considered
as "good" values. However, see ``ignore_flags`` parameter on how to
selectively ignore some bits in the ``bitfield`` array data.
ignore_flags : int, str, list, None (Default = 0)
An integer bitmask, a Python list of bit flags, a comma- or
'+'-separated string list of integer bit flags that indicate what
bits in the input ``bitfield`` should be *ignored* (i.e., zeroed), or
`None`.
| Setting ``ignore_flags`` to `None` effectively will make
`bitfield_to_boolean_mask` interpret all ``bitfield`` elements
as "good" regardless of their value.
| When ``ignore_flags`` argument is an integer bitmask, it will be
combined using bitwise-NOT and bitwise-AND with each element of the
input ``bitfield`` array (``~ignore_flags & bitfield``). If the
resultant bitfield element is non-zero, that element will be
interpreted as a "bad" in the output boolean mask and it will be
interpreted as "good" otherwise. ``flip_bits`` parameter may be used
to flip the bits (``bitwise-NOT``) of the bitmask thus effectively
changing the meaning of the ``ignore_flags`` parameter from "ignore"
to "use only" these flags.
.. note::
Setting ``ignore_flags`` to 0 effectively will assume that all
non-zero elements in the input ``bitfield`` array are to be
interpreted as "bad".
| When ``ignore_flags`` argument is an Python list of integer bit
flags, these flags are added together to create an integer bitmask.
Each item in the list must be a flag, i.e., an integer that is an
integer power of 2. In order to flip the bits of the resultant
bitmask, use ``flip_bits`` parameter.
| Alternatively, ``ignore_flags`` may be a string of comma- or
'+'-separated list of integer bit flags that should be added together
to create an integer bitmask. For example, both ``'4,8'`` and
``'4+8'`` are equivalent and indicate that bit flags 4 and 8 in
the input ``bitfield`` array should be ignored when generating
boolean mask.
.. note::
``'None'``, ``'INDEF'``, and empty (or all white space) strings
are special values of string ``ignore_flags`` that are
interpreted as `None`.
.. note::
Each item in the list must be a flag, i.e., an integer that is an
integer power of 2. In addition, for convenience, an arbitrary
**single** integer is allowed and it will be interpretted as an
integer bitmask. For example, instead of ``'4,8'`` one could
simply provide string ``'12'``.
.. note::
When ``ignore_flags`` is a `str` and when it is prepended with
'~', then the meaning of ``ignore_flags`` parameters will be
reversed: now it will be interpreted as a list of bit flags to be
*used* (or *not ignored*) when deciding which elements of the
input ``bitfield`` array are "bad". Following this convention,
an ``ignore_flags`` string value of ``'~0'`` would be equivalent
to setting ``ignore_flags=None``.
.. warning::
Because prepending '~' to a string ``ignore_flags`` is equivalent
to setting ``flip_bits`` to `True`, ``flip_bits`` cannot be used
with string ``ignore_flags`` and it must be set to `None`.
flip_bits : bool, None (Default = None)
Specifies whether or not to invert the bits of the bitmask either
supplied directly through ``ignore_flags`` parameter or built from the
bit flags passed through ``ignore_flags`` (only when bit flags are
passed as Python lists of integer bit flags). Occasionally, it may be
useful to *consider only specific bit flags* in the ``bitfield``
array when creating a boolean mask as opposite to *ignoring* specific
bit flags as ``ignore_flags`` behaves by default. This can be achieved
by inverting/flipping the bits of the bitmask created from
``ignore_flags`` flags which effectively changes the meaning of the
``ignore_flags`` parameter from "ignore" to "use only" these flags.
Setting ``flip_bits`` to `None` means that no bit flipping will be
performed. Bit flipping for string lists of bit flags must be
specified by prepending '~' to string bit flag lists
(see documentation for ``ignore_flags`` for more details).
.. warning::
This parameter can be set to either `True` or `False` **ONLY** when
``ignore_flags`` is either an integer bitmask or a Python
list of integer bit flags. When ``ignore_flags`` is either
`None` or a string list of flags, ``flip_bits`` **MUST** be set
to `None`.
good_mask_value : int, bool (Default = True)
This parameter is used to derive the values that will be assigned to
the elements in the output boolean mask array that correspond to the
"good" bit fields (that are 0 after zeroing bits specified by
``ignore_flags``) in the input ``bitfield`` array. When
``good_mask_value`` is non-zero or `True` then values in the output
boolean mask array corresponding to "good" bit fields in ``bitfield``
will be `True` (if ``dtype`` is `numpy.bool_`) or 1 (if ``dtype`` is
of numerical type) and values of corresponding to "bad" flags will be
`False` (or 0). When ``good_mask_value`` is zero or `False` then the
values in the output boolean mask array corresponding to "good" bit
fields in ``bitfield`` will be `False` (if ``dtype`` is `numpy.bool_`)
or 0 (if ``dtype`` is of numerical type) and values of corresponding
to "bad" flags will be `True` (or 1).
dtype : data-type (Default = numpy.bool\_)
The desired data-type for the output binary mask array.
Returns
-------
mask : numpy.ndarray
Returns an array of the same dimensionality as the input ``bitfield``
array whose elements can have two possible values,
e.g., `True` or `False` (or 1 or 0 for integer ``dtype``) according to
values of to the input ``bitfield`` elements, ``ignore_flags``
parameter, and the ``good_mask_value`` parameter.
Examples
--------
>>> from stsci.tools import bitmask
>>> import numpy as np
>>> dqbits = np.asarray([[0,0,1,2,0,8,12,0],[10,4,0,0,0,16,6,0]])
>>> bitmask.bitfield_to_boolean_mask(dqbits, ignore_flags=0, dtype=int)
array([[1, 1, 0, 0, 1, 0, 0, 1],
[0, 0, 1, 1, 1, 0, 0, 1]])
>>> bitmask.bitfield_to_boolean_mask(dqbits, ignore_flags=0, dtype=bool)
array([[ True, True, False, False, True, False, False, True],
[False, False, True, True, True, False, False, True]])
>>> bitmask.bitfield_to_boolean_mask(dqbits, ignore_flags=6, good_mask_value=0, dtype=int)
array([[0, 0, 1, 0, 0, 1, 1, 0],
[1, 0, 0, 0, 0, 1, 0, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqbits, ignore_flags=~6, good_mask_value=0, dtype=int)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqbits, ignore_flags=6, flip_bits=True, good_mask_value=0, dtype=int)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqbits, ignore_flags='~(2+4)', good_mask_value=0, dtype=int)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqbits, ignore_flags=[2, 4], flip_bits=True, good_mask_value=0, dtype=int)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
def bitfield_to_boolean_mask(bitfield, ignore_flags=0, flip_bits=None,
good_mask_value=True, dtype=np.bool_):
"""
bitfield_to_boolean_mask(bitfield, ignore_flags=None, flip_bits=None, \
good_mask_value=True, dtype=numpy.bool\_)
Converts an array of bit fields to a boolean (or integer) mask array
according to a bitmask constructed from the supplied bit flags (see
``ignore_flags`` parameter).
This function is particularly useful to convert data quality arrays to
boolean masks with selective filtering of DQ flags.
Parameters
----------
bitfield : numpy.ndarray
An array of bit flags. By default, values different from zero are
interpreted as "bad" values and values equal to zero are considered
as "good" values. However, see ``ignore_flags`` parameter on how to
selectively ignore some bits in the ``bitfield`` array data.
ignore_flags : int, str, list, None (Default = 0)
An integer bitmask, a Python list of bit flags, a comma- or
'+'-separated string list of integer bit flags that indicate what
bits in the input ``bitfield`` should be *ignored* (i.e., zeroed), or
`None`.
| Setting ``ignore_flags`` to `None` effectively will make
`bitfield_to_boolean_mask` interpret all ``bitfield`` elements
as "good" regardless of their value.
| When ``ignore_flags`` argument is an integer bitmask, it will be
combined using bitwise-NOT and bitwise-AND with each element of the
input ``bitfield`` array (``~ignore_flags & bitfield``). If the
resultant bitfield element is non-zero, that element will be
interpreted as a "bad" in the output boolean mask and it will be
interpreted as "good" otherwise. ``flip_bits`` parameter may be used
to flip the bits (``bitwise-NOT``) of the bitmask thus effectively
changing the meaning of the ``ignore_flags`` parameter from "ignore"
to "use only" these flags.
.. note::
Setting ``ignore_flags`` to 0 effectively will assume that all
non-zero elements in the input ``bitfield`` array are to be
interpreted as "bad".
| When ``ignore_flags`` argument is an Python list of integer bit
flags, these flags are added together to create an integer bitmask.
Each item in the list must be a flag, i.e., an integer that is an
integer power of 2. In order to flip the bits of the resultant
bitmask, use ``flip_bits`` parameter.
| Alternatively, ``ignore_flags`` may be a string of comma- or
'+'-separated list of integer bit flags that should be added together
to create an integer bitmask. For example, both ``'4,8'`` and
``'4+8'`` are equivalent and indicate that bit flags 4 and 8 in
the input ``bitfield`` array should be ignored when generating
boolean mask.
.. note::
``'None'``, ``'INDEF'``, and empty (or all white space) strings
are special values of string ``ignore_flags`` that are
interpreted as `None`.
.. note::
Each item in the list must be a flag, i.e., an integer that is an
integer power of 2. In addition, for convenience, an arbitrary
**single** integer is allowed and it will be interpretted as an
integer bitmask. For example, instead of ``'4,8'`` one could
simply provide string ``'12'``.
.. note::
When ``ignore_flags`` is a `str` and when it is prepended with
'~', then the meaning of ``ignore_flags`` parameters will be
reversed: now it will be interpreted as a list of bit flags to be
*used* (or *not ignored*) when deciding which elements of the
input ``bitfield`` array are "bad". Following this convention,
an ``ignore_flags`` string value of ``'~0'`` would be equivalent
to setting ``ignore_flags=None``.
.. warning::
Because prepending '~' to a string ``ignore_flags`` is equivalent
to setting ``flip_bits`` to `True`, ``flip_bits`` cannot be used
with string ``ignore_flags`` and it must be set to `None`.
flip_bits : bool, None (Default = None)
Specifies whether or not to invert the bits of the bitmask either
supplied directly through ``ignore_flags`` parameter or built from the
bit flags passed through ``ignore_flags`` (only when bit flags are
passed as Python lists of integer bit flags). Occasionally, it may be
useful to *consider only specific bit flags* in the ``bitfield``
array when creating a boolean mask as opposite to *ignoring* specific
bit flags as ``ignore_flags`` behaves by default. This can be achieved
by inverting/flipping the bits of the bitmask created from
``ignore_flags`` flags which effectively changes the meaning of the
``ignore_flags`` parameter from "ignore" to "use only" these flags.
Setting ``flip_bits`` to `None` means that no bit flipping will be
performed. Bit flipping for string lists of bit flags must be
specified by prepending '~' to string bit flag lists
(see documentation for ``ignore_flags`` for more details).
.. warning::
This parameter can be set to either `True` or `False` **ONLY** when
``ignore_flags`` is either an integer bitmask or a Python
list of integer bit flags. When ``ignore_flags`` is either
`None` or a string list of flags, ``flip_bits`` **MUST** be set
to `None`.
good_mask_value : int, bool (Default = True)
This parameter is used to derive the values that will be assigned to
the elements in the output boolean mask array that correspond to the
"good" bit fields (that are 0 after zeroing bits specified by
``ignore_flags``) in the input ``bitfield`` array. When
``good_mask_value`` is non-zero or `True` then values in the output
boolean mask array corresponding to "good" bit fields in ``bitfield``
will be `True` (if ``dtype`` is `numpy.bool_`) or 1 (if ``dtype`` is
of numerical type) and values of corresponding to "bad" flags will be
`False` (or 0). When ``good_mask_value`` is zero or `False` then the
values in the output boolean mask array corresponding to "good" bit
fields in ``bitfield`` will be `False` (if ``dtype`` is `numpy.bool_`)
or 0 (if ``dtype`` is of numerical type) and values of corresponding
to "bad" flags will be `True` (or 1).
dtype : data-type (Default = numpy.bool\_)
The desired data-type for the output binary mask array.
Returns
-------
mask : numpy.ndarray
Returns an array of the same dimensionality as the input ``bitfield``
array whose elements can have two possible values,
e.g., `True` or `False` (or 1 or 0 for integer ``dtype``) according to
values of to the input ``bitfield`` elements, ``ignore_flags``
parameter, and the ``good_mask_value`` parameter.
Examples
--------
>>> from stsci.tools import bitmask
>>> import numpy as np
>>> dqbits = np.asarray([[0,0,1,2,0,8,12,0],[10,4,0,0,0,16,6,0]])
>>> bitmask.bitfield_to_boolean_mask(dqbits, ignore_flags=0, dtype=int)
array([[1, 1, 0, 0, 1, 0, 0, 1],
[0, 0, 1, 1, 1, 0, 0, 1]])
>>> bitmask.bitfield_to_boolean_mask(dqbits, ignore_flags=0, dtype=bool)
array([[ True, True, False, False, True, False, False, True],
[False, False, True, True, True, False, False, True]])
>>> bitmask.bitfield_to_boolean_mask(dqbits, ignore_flags=6, good_mask_value=0, dtype=int)
array([[0, 0, 1, 0, 0, 1, 1, 0],
[1, 0, 0, 0, 0, 1, 0, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqbits, ignore_flags=~6, good_mask_value=0, dtype=int)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqbits, ignore_flags=6, flip_bits=True, good_mask_value=0, dtype=int)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqbits, ignore_flags='~(2+4)', good_mask_value=0, dtype=int)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqbits, ignore_flags=[2, 4], flip_bits=True, good_mask_value=0, dtype=int)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
"""
bitfield = np.asarray(bitfield)
if not np.issubdtype(bitfield.dtype, np.integer):
raise TypeError("Input bitfield array must be of integer type.")
ignore_mask = interpret_bit_flags(ignore_flags, flip_bits=flip_bits)
if ignore_mask is None:
if good_mask_value:
mask = np.ones_like(bitfield, dtype=dtype)
else:
mask = np.zeros_like(bitfield, dtype=dtype)
return mask
# filter out bits beyond the maximum supported by the data type:
ignore_mask = ignore_mask & SUPPORTED_FLAGS
# invert the "ignore" mask:
ignore_mask = np.bitwise_not(ignore_mask, dtype=bitfield.dtype,
casting='unsafe')
mask = np.empty_like(bitfield, dtype=np.bool_)
np.bitwise_and(bitfield, ignore_mask, out=mask, casting='unsafe')
if good_mask_value:
np.logical_not(mask, out=mask)
return mask.astype(dtype=dtype, subok=False, copy=False)
|
Converts input bits value from string to a single integer value or None.
If a comma- or '+'-separated set of values are provided, they are summed.
.. note::
In order to flip the bits of the final result (after summation),
for input of `str` type, prepend '~' to the input string. '~' must
be prepended to the *entire string* and not to each bit flag!
Parameters
----------
val : int, str, None
An integer bit mask or flag, `None`, or a comma- or '+'-separated
string list of integer bit values. If `val` is a `str` and if
it is prepended with '~', then the output bit mask will have its
bits flipped (compared to simple sum of input val).
Returns
-------
bitmask : int or None
Returns and integer bit mask formed from the input bit value
or `None` if input `val` parameter is `None` or an empty string.
If input string value was prepended with '~', then returned
value will have its bits flipped (inverse mask).
def interpret_bits_value(val):
"""
Converts input bits value from string to a single integer value or None.
If a comma- or '+'-separated set of values are provided, they are summed.
.. note::
In order to flip the bits of the final result (after summation),
for input of `str` type, prepend '~' to the input string. '~' must
be prepended to the *entire string* and not to each bit flag!
Parameters
----------
val : int, str, None
An integer bit mask or flag, `None`, or a comma- or '+'-separated
string list of integer bit values. If `val` is a `str` and if
it is prepended with '~', then the output bit mask will have its
bits flipped (compared to simple sum of input val).
Returns
-------
bitmask : int or None
Returns and integer bit mask formed from the input bit value
or `None` if input `val` parameter is `None` or an empty string.
If input string value was prepended with '~', then returned
value will have its bits flipped (inverse mask).
"""
if isinstance(val, int) or val is None:
return val
else:
val = str(val).strip()
if val.startswith('~'):
flip_bits = True
val = val[1:].lstrip()
else:
flip_bits = False
if val.startswith('('):
if val.endswith(')'):
val = val[1:-1].strip()
else:
raise ValueError('Unbalanced parantheses or incorrect syntax.')
if ',' in val:
valspl = val.split(',')
bitmask = 0
for v in valspl:
bitmask += int(v)
elif '+' in val:
valspl = val.split('+')
bitmask = 0
for v in valspl:
bitmask += int(v)
elif val.upper() in ['', 'NONE', 'INDEF']:
return None
else:
bitmask = int(val)
if flip_bits:
bitmask = ~bitmask
return bitmask
|
bitmask2mask(bitmask, ignore_bits, good_mask_value=1, dtype=numpy.bool\_)
Interprets an array of bit flags and converts it to a "binary" mask array.
This function is particularly useful to convert data quality arrays to
binary masks.
Parameters
----------
bitmask : numpy.ndarray
An array of bit flags. Values different from zero are interpreted as
"bad" values and values equal to zero are considered as "good" values.
However, see `ignore_bits` parameter on how to ignore some bits
in the `bitmask` array.
ignore_bits : int, str, None
An integer bit mask, `None`, or a comma- or '+'-separated
string list of integer bit values that indicate what bits in the
input `bitmask` should be *ignored* (i.e., zeroed). If `ignore_bits`
is a `str` and if it is prepended with '~', then the meaning
of `ignore_bits` parameters will be reversed: now it will be
interpreted as a list of bits to be *used* (or *not ignored*) when
deciding what elements of the input `bitmask` array are "bad".
The `ignore_bits` parameter is the integer sum of all of the bit
values from the input `bitmask` array that should be considered
"good" when creating the output binary mask. For example, if
values in the `bitmask` array can be combinations
of 1, 2, 4, and 8 flags and one wants to consider that
values having *only* bit flags 2 and/or 4 as being "good",
then `ignore_bits` should be set to 2+4=6. Then a `bitmask` element
having values 2,4, or 6 will be considered "good", while an
element with a value, e.g., 1+2=3, 4+8=12, etc. will be interpreted
as "bad".
Alternatively, one can enter a comma- or '+'-separated list
of integer bit flags that should be added to obtain the
final "good" bits. For example, both ``4,8`` and ``4+8``
are equivalent to setting `ignore_bits` to 12.
See :py:func:`interpret_bits_value` for examples.
| Setting `ignore_bits` to `None` effectively will interpret
all `bitmask` elements as "good" regardless of their value.
| Setting `ignore_bits` to 0 effectively will assume that all
non-zero elements in the input `bitmask` array are to be
interpreted as "bad".
| In order to reverse the meaning of the `ignore_bits`
parameter from indicating bits in the values of `bitmask`
elements that should be ignored when deciding which elements
are "good" (these are the elements that are zero after ignoring
`ignore_bits`), to indicating the bits should be used
exclusively in deciding whether a `bitmask` element is "good",
prepend '~' to the string value. For example, in order to use
**only** (or **exclusively**) flags 4 and 8 (2nd and 3rd bits)
in the values of the input `bitmask` array when deciding whether
or not that element is "good", set `ignore_bits` to ``~4+8``,
or ``~4,8 To obtain the same effect with an `int` input value
(except for 0), enter -(4+8+1)=-9. Following this convention,
a `ignore_bits` string value of ``'~0'`` would be equivalent to
setting ``ignore_bits=None``.
good_mask_value : int, bool (Default = 1)
This parameter is used to derive the values that will be assigned to
the elements in the output `mask` array that correspond to the "good"
flags (that are 0 after zeroing bits specified by `ignore_bits`)
in the input `bitmask` array. When `good_mask_value` is non-zero or
`True` then values in the output mask array corresponding to "good"
bit flags in `bitmask` will be 1 (or `True` if `dtype` is `bool`) and
values of corresponding to "bad" flags will be 0. When
`good_mask_value` is zero or `False` then values in the output mask
array corresponding to "good" bit flags in `bitmask` will be 0
(or `False` if `dtype` is `bool`) and values of corresponding
to "bad" flags will be 1.
dtype : data-type (Default = numpy.uint8)
The desired data-type for the output binary mask array.
Returns
-------
mask : numpy.ndarray
Returns an array whose elements can have two possible values,
e.g., 1 or 0 (or `True` or `False` if `dtype` is `bool`) according to
values of to the input `bitmask` elements, `ignore_bits` parameter,
and the `good_mask_value` parameter.
def bitmask2mask(bitmask, ignore_bits, good_mask_value=1, dtype=np.bool_):
"""
bitmask2mask(bitmask, ignore_bits, good_mask_value=1, dtype=numpy.bool\_)
Interprets an array of bit flags and converts it to a "binary" mask array.
This function is particularly useful to convert data quality arrays to
binary masks.
Parameters
----------
bitmask : numpy.ndarray
An array of bit flags. Values different from zero are interpreted as
"bad" values and values equal to zero are considered as "good" values.
However, see `ignore_bits` parameter on how to ignore some bits
in the `bitmask` array.
ignore_bits : int, str, None
An integer bit mask, `None`, or a comma- or '+'-separated
string list of integer bit values that indicate what bits in the
input `bitmask` should be *ignored* (i.e., zeroed). If `ignore_bits`
is a `str` and if it is prepended with '~', then the meaning
of `ignore_bits` parameters will be reversed: now it will be
interpreted as a list of bits to be *used* (or *not ignored*) when
deciding what elements of the input `bitmask` array are "bad".
The `ignore_bits` parameter is the integer sum of all of the bit
values from the input `bitmask` array that should be considered
"good" when creating the output binary mask. For example, if
values in the `bitmask` array can be combinations
of 1, 2, 4, and 8 flags and one wants to consider that
values having *only* bit flags 2 and/or 4 as being "good",
then `ignore_bits` should be set to 2+4=6. Then a `bitmask` element
having values 2,4, or 6 will be considered "good", while an
element with a value, e.g., 1+2=3, 4+8=12, etc. will be interpreted
as "bad".
Alternatively, one can enter a comma- or '+'-separated list
of integer bit flags that should be added to obtain the
final "good" bits. For example, both ``4,8`` and ``4+8``
are equivalent to setting `ignore_bits` to 12.
See :py:func:`interpret_bits_value` for examples.
| Setting `ignore_bits` to `None` effectively will interpret
all `bitmask` elements as "good" regardless of their value.
| Setting `ignore_bits` to 0 effectively will assume that all
non-zero elements in the input `bitmask` array are to be
interpreted as "bad".
| In order to reverse the meaning of the `ignore_bits`
parameter from indicating bits in the values of `bitmask`
elements that should be ignored when deciding which elements
are "good" (these are the elements that are zero after ignoring
`ignore_bits`), to indicating the bits should be used
exclusively in deciding whether a `bitmask` element is "good",
prepend '~' to the string value. For example, in order to use
**only** (or **exclusively**) flags 4 and 8 (2nd and 3rd bits)
in the values of the input `bitmask` array when deciding whether
or not that element is "good", set `ignore_bits` to ``~4+8``,
or ``~4,8 To obtain the same effect with an `int` input value
(except for 0), enter -(4+8+1)=-9. Following this convention,
a `ignore_bits` string value of ``'~0'`` would be equivalent to
setting ``ignore_bits=None``.
good_mask_value : int, bool (Default = 1)
This parameter is used to derive the values that will be assigned to
the elements in the output `mask` array that correspond to the "good"
flags (that are 0 after zeroing bits specified by `ignore_bits`)
in the input `bitmask` array. When `good_mask_value` is non-zero or
`True` then values in the output mask array corresponding to "good"
bit flags in `bitmask` will be 1 (or `True` if `dtype` is `bool`) and
values of corresponding to "bad" flags will be 0. When
`good_mask_value` is zero or `False` then values in the output mask
array corresponding to "good" bit flags in `bitmask` will be 0
(or `False` if `dtype` is `bool`) and values of corresponding
to "bad" flags will be 1.
dtype : data-type (Default = numpy.uint8)
The desired data-type for the output binary mask array.
Returns
-------
mask : numpy.ndarray
Returns an array whose elements can have two possible values,
e.g., 1 or 0 (or `True` or `False` if `dtype` is `bool`) according to
values of to the input `bitmask` elements, `ignore_bits` parameter,
and the `good_mask_value` parameter.
"""
if not np.issubdtype(bitmask.dtype, np.integer):
raise TypeError("Input 'bitmask' array must be of integer type.")
ignore_bits = interpret_bits_value(ignore_bits)
if ignore_bits is None:
if good_mask_value:
mask = np.ones_like(bitmask, dtype=dtype)
else:
mask = np.zeros_like(bitmask, dtype=dtype)
return mask
ignore_bits = ~bitmask.dtype.type(ignore_bits)
mask = np.empty_like(bitmask, dtype=np.bool_)
np.bitwise_and(bitmask, ignore_bits, out=mask, casting='unsafe')
if good_mask_value:
np.logical_not(mask, out=mask)
return mask.astype(dtype=dtype, subok=False, copy=False)
|
Format data to get a readable output.
def output_text(account, all_data, show_hourly=False):
"""Format data to get a readable output."""
print("""
#################################
# Hydro Quebec data for account #
# {}
#################################""".format(account))
for contract, data in all_data.items():
data['contract'] = contract
if data['period_total_bill'] is None:
data['period_total_bill'] = 0.0
if data['period_projection'] is None:
data['period_projection'] = 0.0
if data['period_mean_daily_bill'] is None:
data['period_mean_daily_bill'] = 0.0
output = ("""
----------------------------------------------------------------
Contract: {d[contract]}
===================
Balance: {d[balance]:.2f} $
Period Info
===========
Period day number: {d[period_length]:d}
Period total days: {d[period_total_days]:d} days
Period current bill
===================
Total Bill: {d[period_total_bill]:.2f} $
Projection bill: {d[period_projection]:.2f} $
Mean Daily Bill: {d[period_mean_daily_bill]:.2f} $
Total period consumption
========================
Lower price: {d[period_lower_price_consumption]:.2f} kWh
Higher price: {d[period_higher_price_consumption]:.2f} kWh
Total: {d[period_total_consumption]:.2f} kWh
Mean daily: {d[period_mean_daily_consumption]:.2f} kWh""")
print(output.format(d=data))
if data.get("period_average_temperature") is not None:
output2 = ("""Temperature: {d[period_average_temperature]:d} °C""")
print(output2.format(d=data))
if data.get("yesterday_average_temperature") is not None:
output3 = ("""
Yesterday consumption
=====================
Temperature: {d[yesterday_average_temperature]:d} °C
Lower price: {d[yesterday_lower_price_consumption]:.2f} kWh
Higher price: {d[yesterday_higher_price_consumption]:.2f} kWh
Total: {d[yesterday_total_consumption]:.2f} kWh""")
print(output3.format(d=data))
if show_hourly:
msg = ("""
Yesterday consumption details
-----------------------------
Hour | Temperature | Lower price consumption | Higher price consumption | total comsumption
""")
for hdata in data['yesterday_hourly_consumption']:
msg += ("{d[hour]} | {d[temp]:8d} °C | {d[lower]:19.2f} kWh | "
"{d[high]:20.2f} kWh | {d[total]:.2f} kWh\n").format(d=hdata)
print(msg)
if data['annual_total_bill']:
output3 = ("""
Annual Total
============
Start date: {d[annual_date_start]}
End date: {d[annual_date_end]}
Total bill: {d[annual_total_bill]} $
Mean daily bill: {d[annual_mean_daily_bill]} $
Total consumption: {d[annual_total_consumption]} kWh
Mean dailyconsumption: {d[annual_mean_daily_consumption]} kWh
kWh price: {d[annual_kwh_price_cent]} ¢
""")
print(output3.format(d=data))
|
Print data using influxDB format.
def output_influx(data):
"""Print data using influxDB format."""
for contract in data:
# Pop yesterdays data
yesterday_data = data[contract]['yesterday_hourly_consumption']
del data[contract]['yesterday_hourly_consumption']
# Print general data
out = "pyhydroquebec,contract=" + contract + " "
for index, key in enumerate(data[contract]):
if index != 0:
out = out + ","
if key in ("annual_date_start", "annual_date_end"):
out += key + "=\"" + str(data[contract][key]) + "\""
else:
out += key + "=" + str(data[contract][key])
out += " " + str(int(datetime.datetime.now(HQ_TIMEZONE).timestamp() * 1000000000))
print(out)
# Print yesterday values
yesterday = datetime.datetime.now(HQ_TIMEZONE) - datetime.timedelta(days=1)
yesterday = yesterday.replace(minute=0, hour=0, second=0, microsecond=0)
for hour in yesterday_data:
msg = "pyhydroquebec,contract={} {} {}"
data = ",".join(["{}={}".format(key, value) for key, value in hour.items()
if key != 'hour'])
datatime = datetime.datetime.strptime(hour['hour'], '%H:%M:%S')
yesterday = yesterday.replace(hour=datatime.hour)
yesterday_str = str(int(yesterday.timestamp() * 1000000000))
print(msg.format(contract, data, yesterday_str))
|
Convenience function. Returns one of ('x11', 'aqua') in answer to the
question of whether this is an X11-linked Python/tkinter, or a natively
built (framework, Aqua) one. This is only for OSX.
This relies on the assumption that on OSX, PyObjC is installed
in the Framework builds of Python. If it doesn't find PyObjC,
this inspects the actual tkinter library binary via otool.
One driving requirement here is to try to make the determination quickly
and quietly without actually importing/loading any GUI libraries. We
even want to avoid importing tkinter if we can.
def which_darwin_linkage(force_otool_check=False):
""" Convenience function. Returns one of ('x11', 'aqua') in answer to the
question of whether this is an X11-linked Python/tkinter, or a natively
built (framework, Aqua) one. This is only for OSX.
This relies on the assumption that on OSX, PyObjC is installed
in the Framework builds of Python. If it doesn't find PyObjC,
this inspects the actual tkinter library binary via otool.
One driving requirement here is to try to make the determination quickly
and quietly without actually importing/loading any GUI libraries. We
even want to avoid importing tkinter if we can.
"""
# sanity check
assert sys.platform=='darwin', 'Incorrect usage, not on OSX'
# If not forced to run otool, then make some quick and dirty
# simple checks/assumptions, which do not add to startup time and do not
# attempt to initialize any graphics.
if not force_otool_check:
# There will (for now) only ever be an aqua-linked Python/tkinter
# when using Ureka on darwin, so this is an easy short-circuit check.
if 'UR_DIR' in os.environ:
return "aqua"
# There will *usually* be PyObjC modules on sys.path on the natively-
# linked Python. This is assumed to be always correct on Python 2.x, as
# of 2012. This is kludgy but quick and effective.
sp = ",".join(sys.path)
sp = sp.lower().strip(',')
if '/pyobjc' in sp or 'pyobjc,' in sp or 'pyobjc/' in sp or sp.endswith('pyobjc'):
return "aqua"
# Try one more thing - look for the physical PyObjC install dir under site-packages
# The assumption above using sys.path does not seem to be correct as of the
# combination of Python2.7.9/PyObjC3.0.4/2015.
sitepacksloc = os.path.split(os.__file__)[0]+'/site-packages/objc'
if os.path.exists(sitepacksloc):
return "aqua"
# OK, no trace of PyObjC found - need to fall through to the forced otool check.
# Use otool shell command
if PY3K:
import tkinter as TKNTR
else:
import Tkinter as TKNTR
import subprocess
try:
tk_dyn_lib = TKNTR._tkinter.__file__
except AttributeError: # happens on Ureka
if 'UR_DIR' in os.environ:
return 'aqua'
else:
return 'unknown'
libs = subprocess.check_output(('/usr/bin/otool', '-L', tk_dyn_lib)).decode('ascii')
if libs.find('/libX11.') >= 0:
return "x11"
else:
return "aqua"
|
Convenience function to return owner of /dev/console.
If raises is True, this raises an exception on any error.
If not, it returns any error string as the owner name.
If owner is self, and if mask_if_self, returns "<self>".
def get_dc_owner(raises, mask_if_self):
""" Convenience function to return owner of /dev/console.
If raises is True, this raises an exception on any error.
If not, it returns any error string as the owner name.
If owner is self, and if mask_if_self, returns "<self>"."""
try:
from pwd import getpwuid
owner_uid = os.stat('/dev/console').st_uid
self_uid = os.getuid()
if mask_if_self and owner_uid == self_uid:
return "<self>"
owner_name = getpwuid(owner_uid).pw_name
return owner_name
except Exception as e:
if raises:
raise e
else:
return str(e)
|
Determine Julian day from Bahai date
def to_jd(year, month, day):
'''Determine Julian day from Bahai date'''
gy = year - 1 + EPOCH_GREGORIAN_YEAR
if month != 20:
m = 0
else:
if isleap(gy + 1):
m = -14
else:
m = -15
return gregorian.to_jd(gy, 3, 20) + (19 * (month - 1)) + m + day
|
Calculate Bahai date from Julian day
def from_jd(jd):
'''Calculate Bahai date from Julian day'''
jd = trunc(jd) + 0.5
g = gregorian.from_jd(jd)
gy = g[0]
bstarty = EPOCH_GREGORIAN_YEAR
if jd <= gregorian.to_jd(gy, 3, 20):
x = 1
else:
x = 0
# verify this next line...
bys = gy - (bstarty + (((gregorian.to_jd(gy, 1, 1) <= jd) and x)))
year = bys + 1
days = jd - to_jd(year, 1, 1)
bld = to_jd(year, 20, 1)
if jd >= bld:
month = 20
else:
month = trunc(days / 19) + 1
day = int((jd + 1) - to_jd(year, month, 1))
return year, month, day
|
Determine Julian day from Mayan long count
def to_jd(baktun, katun, tun, uinal, kin):
'''Determine Julian day from Mayan long count'''
return EPOCH + (baktun * 144000) + (katun * 7200) + (tun * 360) + (uinal * 20) + kin
|
Calculate Mayan long count from Julian day
def from_jd(jd):
'''Calculate Mayan long count from Julian day'''
d = jd - EPOCH
baktun = trunc(d / 144000)
d = (d % 144000)
katun = trunc(d / 7200)
d = (d % 7200)
tun = trunc(d / 360)
d = (d % 360)
uinal = trunc(d / 20)
kin = int((d % 20))
return (baktun, katun, tun, uinal, kin)
|
Determine Mayan Haab "month" and day from Julian day
def to_haab(jd):
'''Determine Mayan Haab "month" and day from Julian day'''
# Number of days since the start of the long count
lcount = trunc(jd) + 0.5 - EPOCH
# Long Count begins 348 days after the start of the cycle
day = (lcount + 348) % 365
count = day % 20
month = trunc(day / 20)
return int(count), HAAB_MONTHS[month]
|
Determine Mayan Tzolkin "month" and day from Julian day
def to_tzolkin(jd):
'''Determine Mayan Tzolkin "month" and day from Julian day'''
lcount = trunc(jd) + 0.5 - EPOCH
day = amod(lcount + 4, 13)
name = amod(lcount + 20, 20)
return int(day), TZOLKIN_NAMES[int(name) - 1]
|
Return the count of the given haab in the cycle. e.g. 0 Pop == 1, 5 Wayeb' == 365
def _haab_count(day, month):
'''Return the count of the given haab in the cycle. e.g. 0 Pop == 1, 5 Wayeb' == 365'''
if day < 0 or day > 19:
raise IndexError("Invalid day number")
try:
i = HAAB_MONTHS.index(month)
except ValueError:
raise ValueError("'{0}' is not a valid Haab' month".format(month))
return min(i * 20, 360) + day
|
For a given tzolkin name/number combination, return a generator
that gives cycle, starting with the input
def tzolkin_generator(number=None, name=None):
'''For a given tzolkin name/number combination, return a generator
that gives cycle, starting with the input'''
# By default, it will start at the beginning
number = number or 13
name = name or "Ajaw"
if number > 13:
raise ValueError("Invalid day number")
if name not in TZOLKIN_NAMES:
raise ValueError("Invalid day name")
count = _tzolkin_count(number, name)
ranged = itertools.chain(list(range(count, 260)), list(range(1, count)))
for i in ranged:
yield _tzolkin_from_count(i)
|
Generate long counts, starting with input
def longcount_generator(baktun, katun, tun, uinal, kin):
'''Generate long counts, starting with input'''
j = to_jd(baktun, katun, tun, uinal, kin)
while True:
yield from_jd(j)
j = j + 1
|
For a given haab month and a julian day count, find the next start of that month on or after the JDC
def next_haab(month, jd):
'''For a given haab month and a julian day count, find the next start of that month on or after the JDC'''
if jd < EPOCH:
raise IndexError("Input day is before Mayan epoch.")
hday, hmonth = to_haab(jd)
if hmonth == month:
days = 1 - hday
else:
count1 = _haab_count(hday, hmonth)
count2 = _haab_count(1, month)
# Find number of days between haab of given jd and desired haab
days = (count2 - count1) % 365
# add in the number of days and return new jd
return jd + days
|
For a given tzolk'in day, and a julian day count, find the next occurrance of that tzolk'in after the date
def next_tzolkin(tzolkin, jd):
'''For a given tzolk'in day, and a julian day count, find the next occurrance of that tzolk'in after the date'''
if jd < EPOCH:
raise IndexError("Input day is before Mayan epoch.")
count1 = _tzolkin_count(*to_tzolkin(jd))
count2 = _tzolkin_count(*tzolkin)
add_days = (count2 - count1) % 260
return jd + add_days
|
For a given haab-tzolk'in combination, and a Julian day count, find the next occurrance of the combination after the date
def next_tzolkin_haab(tzolkin, haab, jd):
'''For a given haab-tzolk'in combination, and a Julian day count, find the next occurrance of the combination after the date'''
# get H & T of input jd, and their place in the 18,980 day cycle
haabcount = _haab_count(*to_haab(jd))
haab_desired_count = _haab_count(*haab)
# How many days between the input day and the desired day?
haab_days = (haab_desired_count - haabcount) % 365
possible_haab = set(h + haab_days for h in range(0, 18980, 365))
tzcount = _tzolkin_count(*to_tzolkin(jd))
tz_desired_count = _tzolkin_count(*tzolkin)
# How many days between the input day and the desired day?
tzolkin_days = (tz_desired_count - tzcount) % 260
possible_tz = set(t + tzolkin_days for t in range(0, 18980, 260))
try:
return possible_tz.intersection(possible_haab).pop() + jd
except KeyError:
raise IndexError("That Haab'-Tzolk'in combination isn't possible")
|
For a given long count, return a calender of the current haab month, divided into tzolkin "weeks"
def haab_monthcalendar(baktun=None, katun=None, tun=None, uinal=None, kin=None, jdc=None):
'''For a given long count, return a calender of the current haab month, divided into tzolkin "weeks"'''
if not jdc:
jdc = to_jd(baktun, katun, tun, uinal, kin)
haab_number, haab_month = to_haab(jdc)
first_j = jdc - haab_number + 1
tzolkin_start_number, tzolkin_start_name = to_tzolkin(first_j)
gen_longcount = longcount_generator(*from_jd(first_j))
gen_tzolkin = tzolkin_generator(tzolkin_start_number, tzolkin_start_name)
# 13 day long tzolkin 'weeks'
lpad = tzolkin_start_number - 1
rpad = 13 - (tzolkin_start_number + 19 % 13)
monlen = month_length(haab_month)
days = [None] * lpad + list(range(1, monlen + 1)) + rpad * [None]
def g(x, generate):
if x is None:
return None
return next(generate)
return [[(k, g(k, gen_tzolkin), g(k, gen_longcount)) for k in days[i:i + 13]] for i in range(0, len(days), 13)]
|
Update the data in this object.
def _update_data(self, data={}):
'''Update the data in this object.'''
# Store the changes to prevent this update from affecting it
pending_changes = self._changes or {}
try:
del self._changes
except:
pass
# Map custom fields into our custom fields object
try:
custom_field_data = data.pop('custom_fields')
except KeyError:
pass
else:
self.custom_fields = Custom_Fields(custom_field_data)
# Map all other dictionary data to object attributes
for key, value in data.iteritems():
lookup_key = self._field_type.get(key, key)
# if it's a datetime object, turn into proper DT object
if lookup_key == 'datetime' or lookup_key == 'date':
self.__dict__[key] = datetime_parse(value)
else:
# Check to see if there's cache data for this item.
# Will return an object if it's recognized as one.
self.__dict__[key] = self._redmine.check_cache(lookup_key, value)
# Set the changes dict to track all changes from here on out
self._changes = pending_changes
|
Remaps a given changed field from tag to tag_id.
def _remap_tag_to_tag_id(cls, tag, new_data):
'''Remaps a given changed field from tag to tag_id.'''
try:
value = new_data[tag]
except:
# If tag wasn't changed, just return
return
tag_id = tag + '_id'
try:
# Remap the ID change to the required field
new_data[tag_id] = value['id']
except:
try:
# Try and grab the id of the object
new_data[tag_id] = value.id
except AttributeError:
# If the changes field is not a dict or object, just use whatever value was given
new_data[tag_id] = value
# Remove the tag from the changed data
del new_data[tag]
|
Add an item manager to this object.
def _add_item_manager(self, key, item_class, **paths):
'''
Add an item manager to this object.
'''
updated_paths = {}
for path_type, path_value in paths.iteritems():
updated_paths[path_type] = path_value.format(**self.__dict__)
manager = Redmine_Items_Manager(self._redmine, item_class,
**updated_paths)
self.__dict__[key] = manager
|
Save all changes on this item (if any) back to Redmine.
def save(self):
'''Save all changes on this item (if any) back to Redmine.'''
self._check_custom_fields()
if not self._changes:
return None
for tag in self._remap_to_id:
self._remap_tag_to_tag_id(tag, self._changes)
# Check for custom handlers for tags
for tag, type in self._field_type.items():
try:
raw_data = self._changes[tag]
except:
continue
# Convert datetime type to a datetime string that Redmine expects
if type == 'datetime':
try:
self._changes[tag] = raw_data.strftime('%Y-%m-%dT%H:%M:%S%z')
except AttributeError:
continue
# Convert date type to a date string that Redmine expects
if type == 'date':
try:
self._changes[tag] = raw_data.strftime('%Y-%m-%d')
except AttributeError:
continue
try:
self._update(self._changes)
except:
raise
else:
# Successful save, woot! Now clear the changes dict
self._changes.clear()
|
Refresh this item from data on the server.
Will save any unsaved data first.
def refresh(self):
'''Refresh this item from data on the server.
Will save any unsaved data first.'''
if not self._item_path:
raise AttributeError('refresh is not available for %s' % self._type)
if not self.id:
raise RedmineError('%s did not come from the Redmine server - no link.' % self._type)
try:
self.save()
except:
pass
# Mimic the Redmine_Item_Manager.get command
target = self._item_path % self.id
json_data = self._redmine.get(target)
data = self._redmine.unwrap_json(self._type, json_data)
self._update_data(data=data)
|
Get all changed values.
def _get_changes(self):
'''Get all changed values.'''
result = dict( (f['id'], f.get('value','')) for f in self._data if f.get('changed', False) )
self._clear_changes
return result
|
Return a query interator with (id, object) pairs.
def iteritems(self, **options):
'''Return a query interator with (id, object) pairs.'''
iter = self.query(**options)
while True:
obj = iter.next()
yield (obj.id, obj)
|
Return an object derived from the given json data.
def _objectify(self, json_data=None, data={}):
'''Return an object derived from the given json data.'''
if json_data:
# Parse the data
try:
data = json.loads(json_data)
except ValueError:
# If parsing failed, then raise the string which likely contains an error message instead of data
raise RedmineError(json_data)
# Check to see if there is a data wrapper
# Some replies will have {'issue':{<data>}} instead of just {<data>}
try:
data = data[self._item_type]
except KeyError:
pass
# Either returns a new item or updates the item in the cache and returns that
return self._redmine.check_cache(self._item_type, data, self._object)
|
Create a new item with the provided dict information. Returns the new item.
def new(self, **dict):
'''Create a new item with the provided dict information. Returns the new item.'''
if not self._item_new_path:
raise AttributeError('new is not available for %s' % self._item_name)
# Remap various tag to tag_id
for tag in self._object._remap_to_id:
self._object._remap_tag_to_tag_id(tag, dict)
target = self._item_new_path
payload = json.dumps({self._item_type:dict})
json_data = self._redmine.post(target, payload)
data = self._redmine.unwrap_json(self._item_type, json_data)
data['_source_path'] = target
return self._objectify(data=data)
|
Get a single item with the given ID
def get(self, id, **options):
'''Get a single item with the given ID'''
if not self._item_path:
raise AttributeError('get is not available for %s' % self._item_name)
target = self._item_path % id
json_data = self._redmine.get(target, **options)
data = self._redmine.unwrap_json(self._item_type, json_data)
data['_source_path'] = target
return self._objectify(data=data)
|
Update a given item with the passed data.
def update(self, id, **dict):
'''Update a given item with the passed data.'''
if not self._item_path:
raise AttributeError('update is not available for %s' % self._item_name)
target = (self._update_path or self._item_path) % id
payload = json.dumps({self._item_type:dict})
self._redmine.put(target, payload)
return None
|
Delete a single item with the given ID
def delete(self, id):
'''Delete a single item with the given ID'''
if not self._item_path:
raise AttributeError('delete is not available for %s' % self._item_name)
target = self._item_path % id
self._redmine.delete(target)
return None
|
Return an iterator for the given items.
def query(self, **options):
'''Return an iterator for the given items.'''
if not self._query_path:
raise AttributeError('query is not available for %s' % self._item_name)
last_item = 0
offset = 0
current_item = None
limit = options.get('limit', 25)
options['limit'] = limit
target = self._query_path
while True:
options['offset'] = offset
# go get the data with the given offset
json_data = self._redmine.get(target, options)
# Try and read the json
try:
data = json.loads(json_data)
except:
raise RedmineError(json_data)
# The data is enclosed in the _query_container item
# That is, {'issues':{(issue1),(issue2)...}, 'total_count':##}
data_container = data[self._query_container]
for item_data in data_container:
yield(self._objectify(data=item_data))
# If the container was empty, we requested past the end, just exit
if not data_container:
break
try:
if int(data['total_count']) > ( offset + len(data_container) ):
# moar data!
offset += limit
else:
break
except:
# If we don't even have a 'total_count', we're done.
break
|
Create the authentication object with the given credentials.
def _setup_authentication(self, username, password):
'''Create the authentication object with the given credentials.'''
## BUG WORKAROUND
if self.version < 1.1:
# Version 1.0 had a bug when using the key parameter.
# Later versions have the opposite bug (a key in the username doesn't function)
if not username:
username = self._key
self._key = None
if not username:
return
if not password:
password = '12345' #the same combination on my luggage! (required dummy value)
#realm = 'Redmine API' - doesn't always work
# create a password manager
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_mgr.add_password(None, self._url, username, password )
handler = urllib2.HTTPBasicAuthHandler( password_mgr )
# create "opener" (OpenerDirector instance)
self._opener = urllib2.build_opener( handler )
# set the opener when we fetch the URL
self._opener.open( self._url )
# Install the opener.
urllib2.install_opener( self._opener )
|
Opens a page from the server with optional XML. Returns a response file-like object
def open_raw(self, page, parms=None, payload=None, HTTPrequest=None, payload_type='application/json' ):
'''Opens a page from the server with optional XML. Returns a response file-like object'''
if not parms:
parms={}
# if we're using a key, but it's not going in the header, add it to the parms array
if self._key and not self.key_in_header:
parms['key'] = self._key
# encode any data
urldata = ''
if parms:
urldata = '?' + urllib.urlencode( parms )
fullUrl = self._url + page
#debug
if self.debug:
print fullUrl + urldata
# register this url to be used with the opener
# must be registered for each unique path
try:
self._opener.open( fullUrl )
except AttributeError:
# No authentication
pass
# Set up the request
if HTTPrequest:
request = HTTPrequest( fullUrl + urldata )
else:
request = urllib2.Request( fullUrl + urldata )
# If the key is set and in the header, add it
if self._key and self.key_in_header:
request.add_header('X-Redmine-API-Key', self._key)
# If impersonation is set, add header
if self.impersonate and self.impersonation_supported:
request.add_header('X-Redmine-Switch-User', self.impersonate)
# get the data and return XML object
if payload:
request.add_header('Content-Type', payload_type)
response = urllib2.urlopen( request, payload )
else:
response = urllib2.urlopen( request )
return response
|
Opens a page from the server with optional content. Returns the string response.
def open(self, page, parms=None, payload=None, HTTPrequest=None ):
'''Opens a page from the server with optional content. Returns the string response.'''
response = self.open_raw( page, parms, payload, HTTPrequest )
return response.read()
|
Posts a string payload to the server - used to make new Redmine items. Returns an JSON string or error.
def post(self, page, payload, parms=None ):
'''Posts a string payload to the server - used to make new Redmine items. Returns an JSON string or error.'''
if self.readonlytest:
print 'Redmine read only test: Pretending to create: ' + page
return payload
else:
return self.open( page, parms, payload )
|
Puts an XML object on the server - used to update Redmine items. Returns nothing useful.
def put(self, page, payload, parms=None ):
'''Puts an XML object on the server - used to update Redmine items. Returns nothing useful.'''
if self.readonlytest:
print 'Redmine read only test: Pretending to update: ' + page
else:
return self.open( page, parms, payload, HTTPrequest=self.PUT_Request )
|
Deletes a given object on the server - used to remove items from Redmine. Use carefully!
def delete(self, page ):
'''Deletes a given object on the server - used to remove items from Redmine. Use carefully!'''
if self.readonlytest:
print 'Redmine read only test: Pretending to delete: ' + page
else:
return self.open( page, HTTPrequest=self.DELETE_Request )
|
Decodes a json string, and unwraps any 'type' it finds within.
def unwrap_json(self, type, json_data):
'''Decodes a json string, and unwraps any 'type' it finds within.'''
# Parse the data
try:
data = json.loads(json_data)
except ValueError:
# If parsing failed, then raise the string which likely contains an error message instead of data
raise RedmineError(json_data)
# Check to see if there is a data wrapper
# Some replies will have {'issue':{<data>}} instead of just {<data>}
try:
data = data[type]
except KeyError:
pass
return data
|
Finds and stores a reference to all Redmine_Item subclasses for later use.
def find_all_item_classes(self):
'''Finds and stores a reference to all Redmine_Item subclasses for later use.'''
# This is a circular import, but performed after the class is defined and an object is instatiated.
# We do this in order to get references to any objects definitions in the redmine.py file
# without requiring anyone editing the file to do anything other than create a class with the proper name.
import redmine as public_classes
item_class = {}
for key, value in public_classes.__dict__.items():
try:
if issubclass(value, Redmine_Item):
item_class[key.lower()] = value
except:
continue
self.item_class = item_class
|
Returns the updated cached version of the given dict
def check_cache(self, type, data, obj=None):
'''Returns the updated cached version of the given dict'''
try:
id = data['id']
except:
# Not an identifiable item
#print 'don\'t know this item %r:%r' % (type, data)
return data
# If obj was passed in, its type takes precedence
try:
type = obj._get_type()
except:
pass
# Find the item in the cache, update and return if it's there
try:
hit = self.item_cache[type][id]
except KeyError:
pass
else:
hit._update_data(data)
#print 'cache hit for %s at %s' % (type, id)
return hit
# Not there? Let's make us a new item
# If we weren't given the object ref, find the name in the global scope
if not obj:
# Default to Redmine_Item if it's not found
obj = self.item_class.get(type, Redmine_Item)
new_item = obj(redmine=self, data=data, type=type)
# Store it
self.item_cache.setdefault(type, {})[id] = new_item
#print 'set new %s at %s' % (type, id)
return new_item
|
Return a Point instance as the displacement of two points.
def substract(self, pt):
"""Return a Point instance as the displacement of two points."""
if isinstance(pt, Point):
return Point(pt.x - self.x, pt.y - self.y, pt.z - self.z)
else:
raise TypeError
|
Return a Point instance from a given list
def from_list(cls, l):
"""Return a Point instance from a given list"""
if len(l) == 3:
x, y, z = map(float, l)
return cls(x, y, z)
elif len(l) == 2:
x, y = map(float, l)
return cls(x, y)
else:
raise AttributeError
|
Return a Vector as the product of the vector and a real number.
def multiply(self, number):
"""Return a Vector as the product of the vector and a real number."""
return self.from_list([x * number for x in self.to_list()])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.