text
stringlengths 81
112k
|
|---|
Convert coordintes from TWD97 to WGS84
The east and north coordinates should be in meters and in float
pkm true for Penghu, Kinmen and Matsu area
You can specify one of the following presentations of the returned values:
dms - A tuple with degrees (int), minutes (int) and seconds (float)
dmsstr - [+/-]DDD°MMM'DDD.DDDDD" (unicode)
mindec - A tuple with degrees (int) and minutes (float)
mindecstr - [+/-]DDD°MMM.MMMMM' (unicode)
(default)degdec - DDD.DDDDD (float)
def towgs84(E, N, pkm=False, presentation=None):
"""
Convert coordintes from TWD97 to WGS84
The east and north coordinates should be in meters and in float
pkm true for Penghu, Kinmen and Matsu area
You can specify one of the following presentations of the returned values:
dms - A tuple with degrees (int), minutes (int) and seconds (float)
dmsstr - [+/-]DDD°MMM'DDD.DDDDD" (unicode)
mindec - A tuple with degrees (int) and minutes (float)
mindecstr - [+/-]DDD°MMM.MMMMM' (unicode)
(default)degdec - DDD.DDDDD (float)
"""
_lng0 = lng0pkm if pkm else lng0
E /= 1000.0
N /= 1000.0
epsilon = (N-N0) / (k0*A)
eta = (E-E0) / (k0*A)
epsilonp = epsilon - beta1*sin(2*1*epsilon)*cosh(2*1*eta) - \
beta2*sin(2*2*epsilon)*cosh(2*2*eta) - \
beta3*sin(2*3*epsilon)*cosh(2*3*eta)
etap = eta - beta1*cos(2*1*epsilon)*sinh(2*1*eta) - \
beta2*cos(2*2*epsilon)*sinh(2*2*eta) - \
beta3*cos(2*3*epsilon)*sinh(2*3*eta)
sigmap = 1 - 2*1*beta1*cos(2*1*epsilon)*cosh(2*1*eta) - \
2*2*beta2*cos(2*2*epsilon)*cosh(2*2*eta) - \
2*3*beta3*cos(2*3*epsilon)*cosh(2*3*eta)
taup = 2*1*beta1*sin(2*1*epsilon)*sinh(2*1*eta) + \
2*2*beta2*sin(2*2*epsilon)*sinh(2*2*eta) + \
2*3*beta3*sin(2*3*epsilon)*sinh(2*3*eta)
chi = asin(sin(epsilonp) / cosh(etap))
latitude = chi + delta1*sin(2*1*chi) + \
delta2*sin(2*2*chi) + \
delta3*sin(2*3*chi)
longitude = _lng0 + atan(sinh(etap) / cos(epsilonp))
func = None
presentation = 'to%s' % presentation if presentation else None
if presentation in presentations:
func = getattr(sys.modules[__name__], presentation)
if func and func != 'todegdec':
return func(degrees(latitude)), func(degrees(longitude))
return (degrees(latitude), degrees(longitude))
|
Convert coordintes from WGS84 to TWD97
pkm true for Penghu, Kinmen and Matsu area
The latitude and longitude can be in the following formats:
[+/-]DDD°MMM'SSS.SSSS" (unicode)
[+/-]DDD°MMM.MMMM' (unicode)
[+/-]DDD.DDDDD (string, unicode or float)
The returned coordinates are in meters
def fromwgs84(lat, lng, pkm=False):
"""
Convert coordintes from WGS84 to TWD97
pkm true for Penghu, Kinmen and Matsu area
The latitude and longitude can be in the following formats:
[+/-]DDD°MMM'SSS.SSSS" (unicode)
[+/-]DDD°MMM.MMMM' (unicode)
[+/-]DDD.DDDDD (string, unicode or float)
The returned coordinates are in meters
"""
_lng0 = lng0pkm if pkm else lng0
lat = radians(todegdec(lat))
lng = radians(todegdec(lng))
t = sinh((atanh(sin(lat)) - 2*pow(n,0.5)/(1+n)*atanh(2*pow(n,0.5)/(1+n)*sin(lat))))
epsilonp = atan(t/cos(lng-_lng0))
etap = atan(sin(lng-_lng0) / pow(1+t*t, 0.5))
E = E0 + k0*A*(etap + alpha1*cos(2*1*epsilonp)*sinh(2*1*etap) +
alpha2*cos(2*2*epsilonp)*sinh(2*2*etap) +
alpha3*cos(2*3*epsilonp)*sinh(2*3*etap))
N = N0 + k0*A*(epsilonp + alpha1*sin(2*1*epsilonp)*cosh(2*1*etap) +
alpha2*sin(2*2*epsilonp)*cosh(2*2*etap) +
alpha3*sin(2*3*epsilonp)*cosh(2*3*etap))
return E*1000, N*1000
|
Makes sure that value is within a specific range.
If not, then the lower or upper bounds is returned
def clipValue(self, value, minValue, maxValue):
'''
Makes sure that value is within a specific range.
If not, then the lower or upper bounds is returned
'''
return min(max(value, minValue), maxValue)
|
returns the ground resolution for based on latitude and zoom level.
def getGroundResolution(self, latitude, level):
'''
returns the ground resolution for based on latitude and zoom level.
'''
latitude = self.clipValue(latitude, self.min_lat, self.max_lat);
mapSize = self.getMapDimensionsByZoomLevel(level)
return math.cos(
latitude * math.pi / 180) * 2 * math.pi * self.earth_radius / \
mapSize
|
returns the map scale on the dpi of the screen
def getMapScale(self, latitude, level, dpi=96):
'''
returns the map scale on the dpi of the screen
'''
dpm = dpi / 0.0254 # convert to dots per meter
return self.getGroundResolution(latitude, level) * dpm
|
returns the x and y values of the pixel corresponding to a latitude
and longitude.
def convertLatLngToPixelXY(self, lat, lng, level):
'''
returns the x and y values of the pixel corresponding to a latitude
and longitude.
'''
mapSize = self.getMapDimensionsByZoomLevel(level)
lat = self.clipValue(lat, self.min_lat, self.max_lat)
lng = self.clipValue(lng, self.min_lng, self.max_lng)
x = (lng + 180) / 360
sinlat = math.sin(lat * math.pi / 180)
y = 0.5 - math.log((1 + sinlat) / (1 - sinlat)) / (4 * math.pi)
pixelX = int(self.clipValue(x * mapSize + 0.5, 0, mapSize - 1))
pixelY = int(self.clipValue(y * mapSize + 0.5, 0, mapSize - 1))
return (pixelX, pixelY)
|
converts a pixel x, y to a latitude and longitude.
def convertPixelXYToLngLat(self, pixelX, pixelY, level):
'''
converts a pixel x, y to a latitude and longitude.
'''
mapSize = self.getMapDimensionsByZoomLevel(level)
x = (self.clipValue(pixelX, 0, mapSize - 1) / mapSize) - 0.5
y = 0.5 - (self.clipValue(pixelY, 0, mapSize - 1) / mapSize)
lat = 90 - 360 * math.atan(math.exp(-y * 2 * math.pi)) / math.pi
lng = 360 * x
return (lng, lat)
|
Computes quadKey value based on tile x, y and z values.
def tileXYZToQuadKey(self, x, y, z):
'''
Computes quadKey value based on tile x, y and z values.
'''
quadKey = ''
for i in range(z, 0, -1):
digit = 0
mask = 1 << (i - 1)
if (x & mask) != 0:
digit += 1
if (y & mask) != 0:
digit += 2
quadKey += str(digit)
return quadKey
|
Computes tile x, y and z values based on quadKey.
def quadKeyToTileXYZ(self, quadKey):
'''
Computes tile x, y and z values based on quadKey.
'''
tileX = 0
tileY = 0
tileZ = len(quadKey)
for i in range(tileZ, 0, -1):
mask = 1 << (i - 1)
value = quadKey[tileZ - i]
if value == '0':
continue
elif value == '1':
tileX |= mask
elif value == '2':
tileY |= mask
elif value == '3':
tileX |= mask
tileY |= mask
else:
raise Exception('Invalid QuadKey')
return (tileX, tileY, tileZ)
|
Returns the upper-left hand corner lat/lng for a tile
def getTileOrigin(self, tileX, tileY, level):
'''
Returns the upper-left hand corner lat/lng for a tile
'''
pixelX, pixelY = self.convertTileXYToPixelXY(tileX, tileY)
lng, lat = self.convertPixelXYToLngLat(pixelX, pixelY, level)
return (lat, lng)
|
Returns a list of tile urls by extent
def getTileUrlsByLatLngExtent(self, xmin, ymin, xmax, ymax, level):
'''
Returns a list of tile urls by extent
'''
# Upper-Left Tile
tileXMin, tileYMin = self.tileUtils.convertLngLatToTileXY(xmin, ymax,
level)
# Lower-Right Tile
tileXMax, tileYMax = self.tileUtils.convertLngLatToTileXY(xmax, ymin,
level)
tileUrls = []
for y in range(tileYMax, tileYMin - 1, -1):
for x in range(tileXMin, tileXMax + 1, 1):
tileUrls.append(self.createTileUrl(x, y, level))
return tileUrls
|
returns new tile url based on template
def createTileUrl(self, x, y, z):
'''
returns new tile url based on template
'''
return self.tileTemplate.replace('{{x}}', str(x)).replace('{{y}}', str(
y)).replace('{{z}}', str(z))
|
Handles navigational reference frame updates.
These are necessary to assign geo coordinates to alerts and other
misc things.
:param event with incoming referenceframe message
def referenceframe(self, event):
"""Handles navigational reference frame updates.
These are necessary to assign geo coordinates to alerts and other
misc things.
:param event with incoming referenceframe message
"""
self.log("Got a reference frame update! ", event, lvl=events)
self.reference_frame = event.data
|
Checks if an alert is ongoing and alerts the newly connected
client, if so.
def userlogin(self, event):
"""Checks if an alert is ongoing and alerts the newly connected
client, if so."""
client_uuid = event.clientuuid
self.log(event.user, pretty=True, lvl=verbose)
self.log('Adding client')
self.clients[event.clientuuid] = event.user
for topic, alert in self.alerts.items():
self.alert(client_uuid, alert)
|
AlertManager event handler for incoming events
:param event with incoming AlertManager message
def trigger(self, event):
"""AlertManager event handler for incoming events
:param event with incoming AlertManager message
"""
topic = event.data.get('topic', None)
if topic is None:
self.log('No alert topic to trigger', lvl=warn)
return
alert = {
'topic': topic,
'message': event.data.get('msg', 'Alert has been triggered'),
'role': event.data.get('role', 'all')
}
self._trigger(event, alert)
|
AlertManager event handler for incoming events
:param event with incoming AlertManager message
def cancel(self, event):
"""AlertManager event handler for incoming events
:param event with incoming AlertManager message
"""
topic = event.data.get('topic', None)
if topic is None:
self.log('No alert topic to cancel', lvl=warn)
return
self._cancel(topic)
|
Isomer Management Tool
This tool supports various operations to manage isomer instances.
Most of the commands are grouped. To obtain more information about the
groups' available sub commands/groups, try
iso [group]
To display details of a command or its sub groups, try
iso [group] [subgroup] [..] [command] --help
To get a map of all available commands, try
iso cmdmap
def cli(ctx, instance, quiet, verbose, log_level, dbhost, dbname):
"""Isomer Management Tool
This tool supports various operations to manage isomer instances.
Most of the commands are grouped. To obtain more information about the
groups' available sub commands/groups, try
iso [group]
To display details of a command or its sub groups, try
iso [group] [subgroup] [..] [command] --help
To get a map of all available commands, try
iso cmdmap
"""
ctx.obj['instance'] = instance
if dbname == db_default and instance != 'default':
dbname = instance
ctx.obj['quiet'] = quiet
ctx.obj['verbose'] = verbose
verbosity['console'] = log_level
verbosity['global'] = log_level
ctx.obj['dbhost'] = dbhost
ctx.obj['dbname'] = dbname
|
Primary entry point for all AstroCats catalogs.
From this entry point, all internal catalogs can be accessed and their
public methods executed (for example: import scripts).
def main():
"""Primary entry point for all AstroCats catalogs.
From this entry point, all internal catalogs can be accessed and their
public methods executed (for example: import scripts).
"""
from datetime import datetime
# Initialize Command-Line and User-Config Settings, Log
# -----------------------------------------------------
beg_time = datetime.now()
# Process command-line arguments to determine action
# If no subcommand (e.g. 'import') is given, returns 'None' --> exit
args, sub_clargs = load_command_line_args()
if args is None:
return
# Create a logging object
log = load_log(args)
# Run configuration/setup interactive script
if args.command == 'setup':
setup_user_config(log)
return
# Make sure configuration file exists, or that's what we're doing
# (with the 'setup' subcommand)
if not os.path.isfile(_CONFIG_PATH):
raise RuntimeError("'{}' does not exist. "
"Run `astrocats setup` to configure."
"".format(_CONFIG_PATH))
git_vers = get_git()
title_str = "Astrocats, version: {}, SHA: {}".format(__version__, git_vers)
log.warning("\n\n{}\n{}\n{}\n".format(title_str, '=' * len(title_str),
beg_time.ctime()))
# Load the user settings from the home directory
args = load_user_config(args, log)
# Choose Catalog and Operation(s) to perform
# ------------------------------------------
mod_name = args.command
log.debug("Importing specified module: '{}'".format(mod_name))
# Try to import the specified module
try:
mod = importlib.import_module('.' + mod_name, package='astrocats')
except Exception as err:
log.error("Import of specified module '{}' failed.".format(mod_name))
log_raise(log, str(err), type(err))
# Run the `main.main` method of the specified module
log.debug("Running `main.main()`")
mod.main.main(args, sub_clargs, log)
end_time = datetime.now()
log.warning("\nAll complete at {}, After {}".format(end_time, end_time -
beg_time))
return
|
Setup a configuration file in the user's home directory.
Currently this method stores default values to a fixed configuration
filename. It should be modified to run an interactive prompt session
asking for parameters (or at least confirming the default ones).
Arguments
---------
log : `logging.Logger` object
def setup_user_config(log):
"""Setup a configuration file in the user's home directory.
Currently this method stores default values to a fixed configuration
filename. It should be modified to run an interactive prompt session
asking for parameters (or at least confirming the default ones).
Arguments
---------
log : `logging.Logger` object
"""
log.warning("AstroCats Setup")
log.warning("Configure filepath: '{}'".format(_CONFIG_PATH))
# Create path to configuration file as needed
config_path_dir = os.path.split(_CONFIG_PATH)[0]
if not os.path.exists(config_path_dir):
log.debug("Creating config directory '{}'".format(config_path_dir))
os.makedirs(config_path_dir)
if not os.path.isdir(config_path_dir):
log_raise(log, "Configure path error '{}'".format(config_path_dir))
# Determine default settings
# Get this containing directory and use that as default data path
def_base_path = os.path.abspath(os.path.dirname(os.path.abspath(__file__)))
log.warning("Setting '{}' to default path: '{}'".format(_BASE_PATH_KEY,
def_base_path))
config = {_BASE_PATH_KEY: def_base_path}
# Write settings to configuration file
json.dump(config, open(_CONFIG_PATH, 'w'))
if not os.path.exists(def_base_path):
log_raise(log, "Problem creating configuration file.")
return
|
Load settings from the user's confiuration file, and add them to `args`.
Settings are loaded from the configuration file in the user's home
directory. Those parameters are added (as attributes) to the `args`
object.
Arguments
---------
args : `argparse.Namespace`
Namespace object to which configuration attributes will be added.
Returns
-------
args : `argparse.Namespace`
Namespace object with added attributes.
def load_user_config(args, log):
"""Load settings from the user's confiuration file, and add them to `args`.
Settings are loaded from the configuration file in the user's home
directory. Those parameters are added (as attributes) to the `args`
object.
Arguments
---------
args : `argparse.Namespace`
Namespace object to which configuration attributes will be added.
Returns
-------
args : `argparse.Namespace`
Namespace object with added attributes.
"""
if not os.path.exists(_CONFIG_PATH):
err_str = (
"Configuration file does not exists ({}).\n".format(_CONFIG_PATH) +
"Run `python -m astrocats setup` to configure.")
log_raise(log, err_str)
config = json.load(open(_CONFIG_PATH, 'r'))
setattr(args, _BASE_PATH_KEY, config[_BASE_PATH_KEY])
log.debug("Loaded configuration: {}: {}".format(_BASE_PATH_KEY, config[
_BASE_PATH_KEY]))
return args
|
Load and parse command-line arguments.
Arguments
---------
args : str or None
'Faked' commandline arguments passed to `argparse`.
Returns
-------
args : `argparse.Namespace` object
Namespace in which settings are stored - default values modified by the
given command-line arguments.
def load_command_line_args(clargs=None):
"""Load and parse command-line arguments.
Arguments
---------
args : str or None
'Faked' commandline arguments passed to `argparse`.
Returns
-------
args : `argparse.Namespace` object
Namespace in which settings are stored - default values modified by the
given command-line arguments.
"""
import argparse
git_vers = get_git()
parser = argparse.ArgumentParser(
prog='astrocats',
description='Generate catalogs for astronomical data.')
parser.add_argument('command', nargs='?', default=None)
parser.add_argument(
'--version',
action='version',
version='AstroCats v{}, SHA: {}'.format(__version__, git_vers))
parser.add_argument(
'--verbose',
'-v',
dest='verbose',
default=False,
action='store_true',
help='Print more messages to the screen.')
parser.add_argument(
'--debug',
'-d',
dest='debug',
default=False,
action='store_true',
help='Print excessive messages to the screen.')
parser.add_argument(
'--include-private',
dest='private',
default=False,
action='store_true',
help='Include private data in import.')
parser.add_argument(
'--travis',
'-t',
dest='travis',
default=False,
action='store_true',
help='Run import script in test mode for Travis.')
parser.add_argument(
'--clone-depth',
dest='clone_depth',
default=0,
type=int,
help=('When cloning git repos, only clone out to this depth '
'(default: 0 = all levels).'))
parser.add_argument(
'--purge-outputs',
dest='purge_outputs',
default=False,
action='store_true',
help=('Purge git outputs after cloning.'))
parser.add_argument(
'--log',
dest='log_filename',
default=None,
help='Filename to which to store logging information.')
# If output files should be written or not
# ----------------------------------------
write_group = parser.add_mutually_exclusive_group()
write_group.add_argument(
'--write',
action='store_true',
dest='write_entries',
default=True,
help='Write entries to files [default].')
write_group.add_argument(
'--no-write',
action='store_false',
dest='write_entries',
default=True,
help='do not write entries to file.')
# If previously cleared output files should be deleted or not
# -----------------------------------------------------------
delete_group = parser.add_mutually_exclusive_group()
delete_group.add_argument(
'--predelete',
action='store_true',
dest='delete_old',
default=True,
help='Delete all old event files to begin [default].')
delete_group.add_argument(
'--no-predelete',
action='store_false',
dest='delete_old',
default=True,
help='Do not delete all old event files to start.')
args, sub_clargs = parser.parse_known_args(args=clargs)
# Print the help information if no command is given
if args.command is None:
parser.print_help()
return None, None
return args, sub_clargs
|
Load a `logging.Logger` object.
Arguments
---------
args : `argparse.Namespace` object
Namespace containing required settings:
{`args.debug`, `args.verbose`, and `args.log_filename`}.
Returns
-------
log : `logging.Logger` object
def load_log(args):
"""Load a `logging.Logger` object.
Arguments
---------
args : `argparse.Namespace` object
Namespace containing required settings:
{`args.debug`, `args.verbose`, and `args.log_filename`}.
Returns
-------
log : `logging.Logger` object
"""
from astrocats.catalog.utils import logger
# Determine verbosity ('None' means use default)
log_stream_level = None
if args.debug:
log_stream_level = logger.DEBUG
elif args.verbose:
log_stream_level = logger.INFO
# Create log
log = logger.get_logger(
stream_level=log_stream_level, tofile=args.log_filename)
log._verbose = args.verbose
log._debug = args.debug
return log
|
Function compares dictionaries by key-value recursively.
Old and new input data are both dictionaries
def compare_dicts(old_full, new_full, old_data, new_data, depth=0):
"""Function compares dictionaries by key-value recursively.
Old and new input data are both dictionaries
"""
depth = depth + 1
indent = " "*depth
# Print with an indentation matching the nested-dictionary depth
def my_print(str):
print("{}{}".format(indent, str))
old_keys = list(old_data.keys())
# Compare data key by key, in *this* dictionary level
# Note: since we're comparing by keys explicity, order doesnt matter
for key in old_keys:
# Remove elements as we go
old_vals = old_data.pop(key)
# Current key
my_print("{}".format(key))
# If `new_data` doesnt also have this key, return False
if key not in new_data:
my_print("Key '{}' not in new_data.".format(key))
my_print("Old:")
my_print(pprint(new_data))
my_print("New:")
my_print(pprint(new_data))
return False
# If it does have the key, extract the values (remove as we go)
new_vals = new_data.pop(key)
# If these values are a sub-dictionary, compare those
if isinstance(old_vals, dict) and isinstance(new_vals, dict):
# If the sub-dictionary are not the same, return False
if not compare_dicts(old_full, new_full, old_vals, new_vals, depth=depth):
return False
# If these values are a list of sub-dictionaries, compare each of those
elif (isinstance(old_vals, list) and isinstance(old_vals[0], dict) and
isinstance(old_vals, list) and isinstance(old_vals[0], dict)):
for old_elem, new_elem in zip_longest(old_vals, new_vals):
# If one or the other has extra elements, print message, but
# continue on
if old_elem is None or new_elem is None:
my_print("Missing element!")
my_print("\tOld: '{}'".format(old_elem))
my_print("\tNew: '{}'".format(new_elem))
else:
if not compare_dicts(old_full, new_full, old_elem, new_elem, depth=depth):
return False
# At the lowest-dictionary level, compare the values themselves
else:
# Turn everything into a list for convenience (most things should be
# already)
if (not isinstance(old_vals, list) and
not isinstance(new_vals, list)):
old_vals = [old_vals]
new_vals = [new_vals]
# Sort both lists
old_vals = sorted(old_vals)
new_vals = sorted(new_vals)
for oldv, newv in zip_longest(old_vals, new_vals):
# If one or the other has extra elements, print message, but
# continue on
if oldv is None or newv is None:
my_print("Missing element!")
my_print("\tOld: '{}'".format(oldv))
my_print("\tNew: '{}'".format(newv))
# If values match, continue
elif oldv == newv:
my_print("Good Match: '{}'".format(key))
# If values dont match, return False
else:
my_print("Bad Match: '{}'".format(key))
my_print("\tOld: '{}'".format(oldv))
my_print("\tNew: '{}'".format(newv))
return False
return True
|
Clips a line to a rectangular area.
This implements the Cohen-Sutherland line clipping algorithm. xmin,
ymax, xmax and ymin denote the clipping area, into which the line
defined by x1, y1 (start point) and x2, y2 (end point) will be
clipped.
If the line does not intersect with the rectangular clipping area,
four None values will be returned as tuple. Otherwise a tuple of the
clipped line points will be returned in the form (cx1, cy1, cx2, cy2).
def cohensutherland(xmin, ymax, xmax, ymin, x1, y1, x2, y2):
"""Clips a line to a rectangular area.
This implements the Cohen-Sutherland line clipping algorithm. xmin,
ymax, xmax and ymin denote the clipping area, into which the line
defined by x1, y1 (start point) and x2, y2 (end point) will be
clipped.
If the line does not intersect with the rectangular clipping area,
four None values will be returned as tuple. Otherwise a tuple of the
clipped line points will be returned in the form (cx1, cy1, cx2, cy2).
"""
INSIDE, LEFT, RIGHT, LOWER, UPPER = 0, 1, 2, 4, 8
def _getclip(xa, ya):
#if dbglvl>1: print('point: '),; print(xa,ya)
p = INSIDE # default is inside
# consider x
if xa < xmin:
p |= LEFT
elif xa > xmax:
p |= RIGHT
# consider y
if ya < ymin:
p |= LOWER # bitwise OR
elif ya > ymax:
p |= UPPER # bitwise OR
return p
# check for trivially outside lines
k1 = _getclip(x1, y1)
k2 = _getclip(x2, y2)
# %% examine non-trivially outside points
# bitwise OR |
while (k1 | k2) != 0: # if both points are inside box (0000) , ACCEPT trivial whole line in box
# if line trivially outside window, REJECT
if (k1 & k2) != 0: # bitwise AND &
#if dbglvl>1: print(' REJECT trivially outside box')
# return nan, nan, nan, nan
return None, None, None, None
# non-trivial case, at least one point outside window
# this is not a bitwise or, it's the word "or"
opt = k1 or k2 # take first non-zero point, short circuit logic
if opt & UPPER: # these are bitwise ANDS
x = x1 + (x2 - x1) * (ymax - y1) / (y2 - y1)
y = ymax
elif opt & LOWER:
x = x1 + (x2 - x1) * (ymin - y1) / (y2 - y1)
y = ymin
elif opt & RIGHT:
y = y1 + (y2 - y1) * (xmax - x1) / (x2 - x1)
x = xmax
elif opt & LEFT:
y = y1 + (y2 - y1) * (xmin - x1) / (x2 - x1)
x = xmin
else:
raise RuntimeError('Undefined clipping state')
if opt == k1:
x1, y1 = x, y
k1 = _getclip(x1, y1)
#if dbglvl>1: print('checking k1: ' + str(x) + ',' + str(y) + ' ' + str(k1))
elif opt == k2:
#if dbglvl>1: print('checking k2: ' + str(x) + ',' + str(y) + ' ' + str(k2))
x2, y2 = x, y
k2 = _getclip(x2, y2)
return x1, y1, x2, y2
|
Horn Schunck legacy OpenCV function requires we use these old-fashioned cv matrices, not numpy array
def setupuv(rc):
"""
Horn Schunck legacy OpenCV function requires we use these old-fashioned cv matrices, not numpy array
"""
if cv is not None:
(r, c) = rc
u = cv.CreateMat(r, c, cv.CV_32FC1)
v = cv.CreateMat(r, c, cv.CV_32FC1)
return (u, v)
else:
return [None]*2
|
Initialize a CatDict object, checking for errors.
def _init_cat_dict(self, cat_dict_class, key_in_self, **kwargs):
"""Initialize a CatDict object, checking for errors.
"""
# Catch errors associated with crappy, but not unexpected data
try:
new_entry = cat_dict_class(self, key=key_in_self, **kwargs)
except CatDictError as err:
if err.warn:
self._log.info("'{}' Not adding '{}': '{}'".format(self[
self._KEYS.NAME], key_in_self, str(err)))
return None
return new_entry
|
Add a CatDict to this Entry if initialization succeeds and it
doesn't already exist within the Entry.
def _add_cat_dict(self,
cat_dict_class,
key_in_self,
check_for_dupes=True,
**kwargs):
"""Add a CatDict to this Entry if initialization succeeds and it
doesn't already exist within the Entry.
"""
# Try to create a new instance of this subclass of `CatDict`
new_entry = self._init_cat_dict(cat_dict_class, key_in_self, **kwargs)
if new_entry is None:
return False
# Compare this new entry with all previous entries to make sure is new
if cat_dict_class != Error:
for item in self.get(key_in_self, []):
if new_entry.is_duplicate_of(item):
item.append_sources_from(new_entry)
# Return the entry in case we want to use any additional
# tags to augment the old entry
return new_entry
self.setdefault(key_in_self, []).append(new_entry)
return True
|
Wrapper for `tqdm` progress bar.
def pbar(iter, desc='', **kwargs):
"""Wrapper for `tqdm` progress bar.
"""
return tqdm(
iter,
desc=('<' + str(datetime.now().strftime("%Y-%m-%d %H:%M:%S")) + '> ' +
desc),
dynamic_ncols=True,
**kwargs)
|
Wrapper for `tqdm` progress bar which also sorts list of strings
def pbar_strings(files, desc='', **kwargs):
"""Wrapper for `tqdm` progress bar which also sorts list of strings
"""
return tqdm(
sorted(files, key=lambda s: s.lower()),
desc=('<' + str(datetime.now().strftime("%Y-%m-%d %H:%M:%S")) + '> ' +
desc),
dynamic_ncols=True,
**kwargs)
|
Get the task `priority` corresponding to the given `task_priority`.
If `task_priority` is an integer or 'None', return it.
If `task_priority` is a str, return the priority of the task it matches.
Otherwise, raise `ValueError`.
def _get_task_priority(tasks, task_priority):
"""Get the task `priority` corresponding to the given `task_priority`.
If `task_priority` is an integer or 'None', return it.
If `task_priority` is a str, return the priority of the task it matches.
Otherwise, raise `ValueError`.
"""
if task_priority is None:
return None
if is_integer(task_priority):
return task_priority
if isinstance(task_priority, basestring):
if task_priority in tasks:
return tasks[task_priority].priority
raise ValueError("Unrecognized task priority '{}'".format(task_priority))
|
Run all of the import tasks.
This is executed by the 'scripts.main.py' when the module is run as an
executable. This can also be run as a method, in which case default
arguments are loaded, but can be overriden using `**kwargs`.
def import_data(self):
"""Run all of the import tasks.
This is executed by the 'scripts.main.py' when the module is run as an
executable. This can also be run as a method, in which case default
arguments are loaded, but can be overriden using `**kwargs`.
"""
tasks_list = self.load_task_list()
warnings.filterwarnings(
'ignore', r'Warning: converting a masked element to nan.')
# FIX
warnings.filterwarnings('ignore', category=DeprecationWarning)
# Delete all old (previously constructed) output files
if self.args.delete_old:
self.log.warning("Deleting all old entry files.")
self.delete_old_entry_files()
# In update mode, load all entry stubs.
if self.args.load_stubs or self.args.update:
self.load_stubs()
if self.args.travis:
self.log.warning("Running in `travis` mode.")
prev_priority = 0
prev_task_name = ''
# for task, task_obj in tasks_list.items():
for task_name, task_obj in tasks_list.items():
if not task_obj.active:
continue
self.log.warning("Task: '{}'".format(task_name))
nice_name = task_obj.nice_name
mod_name = task_obj.module
func_name = task_obj.function
priority = task_obj.priority
# Make sure things are running in the correct order
if priority < prev_priority and priority > 0:
raise RuntimeError("Priority for '{}': '{}', less than prev,"
"'{}': '{}'.\n{}"
.format(task_name, priority, prev_task_name,
prev_priority, task_obj))
self.log.debug("\t{}, {}, {}, {}".format(nice_name, priority,
mod_name, func_name))
mod = importlib.import_module('.' + mod_name, package='astrocats')
self.current_task = task_obj
getattr(mod, func_name)(self)
num_events, num_stubs = self.count()
self.log.warning("Task finished. Events: {}, Stubs: {}".format(
num_events, num_stubs))
self.journal_entries()
num_events, num_stubs = self.count()
self.log.warning("Journal finished. Events: {}, Stubs: {}".format(
num_events, num_stubs))
prev_priority = priority
prev_task_name = task_name
process = psutil.Process(os.getpid())
memory = process.memory_info().rss
self.log.warning('Memory used (MBs): '
'{:,}'.format(memory / 1024. / 1024.))
return
|
Load the list of tasks in this catalog's 'input/tasks.json' file.
A `Task` object is created for each entry, with the parameters filled
in. These are placed in an OrderedDict, sorted by the `priority`
parameter, with positive values and then negative values,
e.g. [0, 2, 10, -10, -1].
def load_task_list(self):
"""Load the list of tasks in this catalog's 'input/tasks.json' file.
A `Task` object is created for each entry, with the parameters filled
in. These are placed in an OrderedDict, sorted by the `priority`
parameter, with positive values and then negative values,
e.g. [0, 2, 10, -10, -1].
"""
# In update mode, do not delete old files
if self.args.update:
self.log.info("Disabling `pre-delete` for 'update' mode.")
self.args.delete_old = False
# Dont allow both a 'min' and 'max' task priority
# FIX: this is probably unnecessary... having both could be useful
if ((self.args.min_task_priority is not None and
self.args.max_task_priority is not None)):
raise ValueError("Can only use *either* 'min' *or* 'max' priority")
# Load tasks data from input json file
tasks, task_names = self._load_task_list_from_file()
# Make sure 'active' modification lists are all valid
args_lists = [
self.args.args_task_list, self.args.yes_task_list,
self.args.no_task_list
]
args_names = ['--tasks', '--yes', '--no']
for arglist, lname in zip(args_lists, args_names):
if arglist is not None:
for tname in arglist:
if tname not in task_names:
raise ValueError(
"Value '{}' in '{}' list does not match"
" any tasks".format(tname, lname))
# Process min/max priority specification ('None' if none given)
min_priority = _get_task_priority(tasks, self.args.min_task_priority)
max_priority = _get_task_priority(tasks, self.args.max_task_priority)
task_groups = self.args.task_groups
if task_groups is not None:
if not isinstance(task_groups, list):
task_groups = [task_groups]
# Iterate over all tasks to determine which should be (in)active
# --------------------------------------------------------------
for key in tasks:
# If in update mode, only run update tasks
if self.args.update:
if not tasks[key].update:
tasks[key].active = False
# If specific list of tasks is given, make only those active
if self.args.args_task_list is not None:
if key in self.args.args_task_list:
tasks[key].active = True
else:
tasks[key].active = False
# Only run tasks above minimum priority
# (doesn't modify negtive priority tasks)
if min_priority is not None and tasks[key].priority >= 0:
tasks[key].active = False
if tasks[key].priority >= min_priority:
tasks[key].active = True
# Only run tasks below maximum priority
# (doesnt modify negative priority tasks)
if max_priority is not None and tasks[key].priority >= 0:
tasks[key].active = False
if tasks[key].priority <= max_priority:
tasks[key].active = True
# Set 'yes' tasks to *active*
if self.args.yes_task_list is not None:
if key in self.args.yes_task_list:
tasks[key].active = True
# Set 'no' tasks to *inactive*
if self.args.no_task_list is not None:
if key in self.args.no_task_list:
tasks[key].active = False
# Set tasks in target 'groups' to *active*
if task_groups is not None and tasks[key].groups is not None:
# Go through each group defined in the command line
for given_group in task_groups:
# If this task is a member of any of those groups
if given_group in tasks[key].groups:
tasks[key].active = True
break
# Sort entries as positive values, then negative values
# [0, 1, 2, 2, 10, -100, -10, -1]
# Tuples are sorted by first element (here: '0' if positive), then
# second (here normal order)
tasks = OrderedDict(
sorted(
tasks.items(),
key=lambda t: (t[1].priority < 0, t[1].priority, t[1].name)))
# Find the first task that has "always_journal" set to True
for key in tasks:
if tasks[key].active and tasks[key].always_journal:
self.min_journal_priority = tasks[key].priority
break
names_act = []
names_inact = []
for key, val in tasks.items():
if val.active:
names_act.append(key)
else:
names_inact.append(key)
self.log.info("Active Tasks:\n\t" + ", ".join(nn for nn in names_act))
self.log.debug("Inactive Tasks:\n\t" + ", ".join(nn for nn in
names_inact))
return tasks
|
Find an existing entry in, or add a new one to, the `entries` dict.
FIX: rename to `create_entry`???
Returns
-------
entries : OrderedDict of Entry objects
newname : str
Name of matching entry found in `entries`, or new entry added to
`entries`
def add_entry(self, name, load=True, delete=True):
"""Find an existing entry in, or add a new one to, the `entries` dict.
FIX: rename to `create_entry`???
Returns
-------
entries : OrderedDict of Entry objects
newname : str
Name of matching entry found in `entries`, or new entry added to
`entries`
"""
newname = self.clean_entry_name(name)
if not newname:
raise (ValueError('Fatal: Attempted to add entry with no name.'))
# If entry already exists, return
if newname in self.entries:
self.log.debug("`newname`: '{}' (name: '{}') already exists.".
format(newname, name))
# If this is a stub, we need to continue, possibly load file
if self.entries[newname]._stub:
self.log.debug("'{}' is a stub".format(newname))
# If a full (non-stub) event exists, return its name
else:
self.log.debug("'{}' is not a stub, returning".format(newname))
return newname
# If entry is alias of another entry in `entries`, find and return that
match_name = self.find_entry_name_of_alias(newname)
if match_name is not None:
self.log.debug(
"`newname`: '{}' (name: '{}') already exists as alias for "
"'{}'.".format(newname, name, match_name))
newname = match_name
# Load entry from file
if load:
loaded_name = self.load_entry_from_name(newname, delete=delete)
if loaded_name:
return loaded_name
# If we match an existing event, return that
if match_name is not None:
return match_name
# Create new entry
new_entry = self.proto(catalog=self, name=newname)
new_entry[self.proto._KEYS.SCHEMA] = self.SCHEMA.URL
self.log.log(self.log._LOADED,
"Created new entry for '{}'".format(newname))
# Add entry to dictionary
self.entries[newname] = new_entry
return newname
|
Return the first entry name with the given 'alias' included in its
list of aliases.
Returns
-------
name of matching entry (str) or 'None' if no matches
def find_entry_name_of_alias(self, alias):
"""Return the first entry name with the given 'alias' included in its
list of aliases.
Returns
-------
name of matching entry (str) or 'None' if no matches
"""
if alias in self.aliases:
name = self.aliases[alias]
if name in self.entries:
return name
else:
# Name wasn't found, possibly merged or deleted. Now look
# really hard.
for name, entry in self.entries.items():
aliases = entry.get_aliases(includename=False)
if alias in aliases:
if (ENTRY.DISTINCT_FROM not in entry or
alias not in entry[ENTRY.DISTINCT_FROM]):
return name
return None
|
Used by `merge_duplicates`
def copy_entry_to_entry(self,
fromentry,
destentry,
check_for_dupes=True,
compare_to_existing=True):
"""Used by `merge_duplicates`
"""
self.log.info("Copy entry object '{}' to '{}'".format(fromentry[
fromentry._KEYS.NAME], destentry[destentry._KEYS.NAME]))
newsourcealiases = {}
if self.proto._KEYS.SOURCES in fromentry:
for source in fromentry[self.proto._KEYS.SOURCES]:
alias = source.pop(SOURCE.ALIAS)
newsourcealiases[alias] = source
newmodelaliases = {}
if self.proto._KEYS.MODELS in fromentry:
for model in fromentry[self.proto._KEYS.MODELS]:
alias = model.pop(MODEL.ALIAS)
newmodelaliases[alias] = model
if self.proto._KEYS.ERRORS in fromentry:
for err in fromentry[self.proto._KEYS.ERRORS]:
destentry.setdefault(self.proto._KEYS.ERRORS, []).append(err)
for rkey in fromentry:
key = fromentry._KEYS.get_key_by_name(rkey)
if key.no_source:
continue
for item in fromentry[key]:
# isd = False
if 'source' not in item:
raise ValueError("Item has no source!")
nsid = []
for sid in item['source'].split(','):
if sid in newsourcealiases:
source = newsourcealiases[sid]
nsid.append(destentry.add_source(**source))
else:
raise ValueError("Couldn't find source alias!")
item['source'] = uniq_cdl(nsid)
if 'model' in item:
nmid = []
for mid in item['model'].split(','):
if mid in newmodelaliases:
model = newmodelaliases[mid]
nmid.append(destentry.add_model(**model))
else:
raise ValueError("Couldn't find model alias!")
item['model'] = uniq_cdl(nmid)
if key == ENTRY.PHOTOMETRY:
destentry.add_photometry(
compare_to_existing=compare_to_existing,
**item)
elif key == ENTRY.SPECTRA:
destentry.add_spectrum(
compare_to_existing=compare_to_existing,
**item)
elif key == ENTRY.ERRORS:
destentry.add_error(**item)
elif key == ENTRY.MODELS:
continue
else:
destentry.add_quantity(
compare_to_existing=compare_to_existing,
check_for_dupes=False, quantities=key, **item)
return
|
Merge and remove duplicate entries.
Compares each entry ('name') in `stubs` to all later entries to check
for duplicates in name or alias. If a duplicate is found, they are
merged and written to file.
def merge_duplicates(self):
"""Merge and remove duplicate entries.
Compares each entry ('name') in `stubs` to all later entries to check
for duplicates in name or alias. If a duplicate is found, they are
merged and written to file.
"""
if len(self.entries) == 0:
self.log.error("WARNING: `entries` is empty, loading stubs")
if self.args.update:
self.log.warning(
"No sources changed, entry files unchanged in update."
" Skipping merge.")
return
self.entries = self.load_stubs()
task_str = self.get_current_task_str()
keys = list(sorted(self.entries.keys()))
n1 = 0
mainpbar = tqdm(total=len(keys), desc=task_str)
while n1 < len(keys):
name1 = keys[n1]
if name1 not in self.entries:
self.log.info("Entry for {} not found, likely already "
"deleted in merging process.".format(name1))
n1 = n1 + 1
mainpbar.update(1)
continue
allnames1 = set(self.entries[name1].get_aliases() + self.entries[
name1].extra_aliases())
# Search all later names
for name2 in keys[n1 + 1:]:
if name1 == name2:
continue
if name1 not in self.entries:
self.log.info("Entry for {} not found, likely already "
"deleted in merging process.".format(name1))
continue
if name2 not in self.entries:
self.log.info("Entry for {} not found, likely already "
"deleted in merging process.".format(name2))
continue
allnames2 = set(self.entries[name2].get_aliases() +
self.entries[name2].extra_aliases())
# If there are any common names or aliases, merge
if len(allnames1 & allnames2):
self.log.warning("Found two entries with common aliases "
"('{}' and '{}'), merging.".format(name1,
name2))
load1 = self.proto.init_from_file(self, name=name1)
load2 = self.proto.init_from_file(self, name=name2)
if load1 is not None and load2 is not None:
# Delete old files
self._delete_entry_file(entry=load1)
self._delete_entry_file(entry=load2)
self.entries[name1] = load1
self.entries[name2] = load2
priority1 = 0
priority2 = 0
for an in allnames1:
if an.startswith(self.entries[name1]
.priority_prefixes()):
priority1 += 1
for an in allnames2:
if an.startswith(self.entries[name2]
.priority_prefixes()):
priority2 += 1
if priority1 > priority2:
self.copy_to_entry_in_catalog(name2, name1)
keys.append(name1)
del self.entries[name2]
else:
self.copy_to_entry_in_catalog(name1, name2)
keys.append(name2)
del self.entries[name1]
else:
self.log.warning('Duplicate already deleted')
# if len(self.entries) != 1:
# self.log.error(
# "WARNING: len(entries) = {}, expected 1. "
# "Still journaling...".format(len(self.entries)))
self.journal_entries()
if self.args.travis and n1 > self.TRAVIS_QUERY_LIMIT:
break
n1 = n1 + 1
mainpbar.update(1)
mainpbar.close()
|
Load all events in their `stub` (name, alias, etc only) form.
Used in `update` mode.
def load_stubs(self, log_mem=False):
"""Load all events in their `stub` (name, alias, etc only) form.
Used in `update` mode.
"""
# Initialize parameter related to diagnostic output of memory usage
if log_mem:
import psutil
process = psutil.Process(os.getpid())
rss = process.memory_info().rss
LOG_MEMORY_INT = 1000
MEMORY_LIMIT = 1000.0
def _add_stub_manually(_fname):
"""Create and add a 'stub' by manually loading parameters from
JSON files.
Previously this was done by creating a full `Entry` instance, then
using the `Entry.get_stub()` method to trim it down. This was very
slow and memory intensive, hence this improved approach.
"""
# FIX: should this be ``fi.endswith(``.gz')`` ?
fname = uncompress_gz(_fname) if '.gz' in _fname else _fname
stub = None
stub_name = None
with codecs.open(fname, 'r') as jfil:
# Load the full JSON file
data = json.load(jfil, object_pairs_hook=OrderedDict)
# Extract the top-level keys (should just be the name of the
# entry)
stub_name = list(data.keys())
# Make sure there is only a single top-level entry
if len(stub_name) != 1:
err = "json file '{}' has multiple keys: {}".format(
fname, list(stub_name))
self._log.error(err)
raise ValueError(err)
stub_name = stub_name[0]
# Make sure a non-stub entry doesnt already exist with this
# name
if stub_name in self.entries and not self.entries[
stub_name]._stub:
err_str = (
"ERROR: non-stub entry already exists with name '{}'"
.format(stub_name))
self.log.error(err_str)
raise RuntimeError(err_str)
# Remove the outmost dict level
data = data[stub_name]
# Create a new `Entry` (subclass) instance
proto = self.proto
stub = proto(catalog=self, name=stub_name, stub=True)
# Add stub parameters if they are available
if proto._KEYS.ALIAS in data:
stub[proto._KEYS.ALIAS] = data[proto._KEYS.ALIAS]
if proto._KEYS.DISTINCT_FROM in data:
stub[proto._KEYS.DISTINCT_FROM] = data[
proto._KEYS.DISTINCT_FROM]
if proto._KEYS.RA in data:
stub[proto._KEYS.RA] = data[proto._KEYS.RA]
if proto._KEYS.DEC in data:
stub[proto._KEYS.DEC] = data[proto._KEYS.DEC]
if proto._KEYS.DISCOVER_DATE in data:
stub[proto._KEYS.DISCOVER_DATE] = data[
proto._KEYS.DISCOVER_DATE]
if proto._KEYS.SOURCES in data:
stub[proto._KEYS.SOURCES] = data[
proto._KEYS.SOURCES]
# Store the stub
self.entries[stub_name] = stub
self.log.debug("Added stub for '{}'".format(stub_name))
currenttask = 'Loading entry stubs'
files = self.PATHS.get_repo_output_file_list()
for ii, _fname in enumerate(pbar(files, currenttask)):
# Run normally
# _add_stub(_fname)
# Run 'manually' (extract stub parameters directly from JSON)
_add_stub_manually(_fname)
if log_mem:
rss = process.memory_info().rss / 1024 / 1024
if ii % LOG_MEMORY_INT == 0 or rss > MEMORY_LIMIT:
log_memory(self.log, "\nLoaded stub {}".format(ii),
logging.INFO)
if rss > MEMORY_LIMIT:
err = (
"Memory usage {}, has exceeded {} on file {} '{}'".
format(rss, MEMORY_LIMIT, ii, _fname))
self.log.error(err)
raise RuntimeError(err)
return self.entries
|
Delete the file associated with the given entry.
def _delete_entry_file(self, entry_name=None, entry=None):
"""Delete the file associated with the given entry.
"""
if entry_name is None and entry is None:
raise RuntimeError("Either `entry_name` or `entry` must be given.")
elif entry_name is not None and entry is not None:
raise RuntimeError("Cannot use both `entry_name` and `entry`.")
if entry_name is not None:
entry = self.entries[entry_name]
else:
entry_name = entry[ENTRY.NAME]
# FIX: do we also need to check for gzipped files??
entry_filename = self.entry_filename(entry_name)
if self.args.write_entries:
self.log.info("Deleting entry file '{}' of entry '{}'".format(
entry_filename, entry_name))
if not os.path.exists(entry_filename):
self.log.error(
"Filename '{}' does not exist".format(entry_filename))
os.remove(entry_filename)
else:
self.log.debug("Not deleting '{}' because `write_entries`"
" is False".format(entry_filename))
return
|
Write all entries in `entries` to files, and clear. Depending on
arguments and `tasks`.
Iterates over all elements of `entries`, saving (possibly 'burying')
and deleting.
- If ``clear == True``, then each element of `entries` is deleted,
and a `stubs` entry is added
def journal_entries(self,
clear=True,
gz=False,
bury=False,
write_stubs=False,
final=False):
"""Write all entries in `entries` to files, and clear. Depending on
arguments and `tasks`.
Iterates over all elements of `entries`, saving (possibly 'burying')
and deleting.
- If ``clear == True``, then each element of `entries` is deleted,
and a `stubs` entry is added
"""
# if (self.current_task.priority >= 0 and
# self.current_task.priority < self.min_journal_priority):
# return
# Write it all out!
# NOTE: this needs to use a `list` wrapper to allow modification of
# dict
for name in list(self.entries.keys()):
if self.args.write_entries:
# If this is a stub and we aren't writing stubs, skip
if self.entries[name]._stub and not write_stubs:
continue
# Bury non-SN entries here if only claimed type is non-SN type,
# or if primary name starts with a non-SN prefix.
bury_entry = False
save_entry = True
if bury:
(bury_entry, save_entry) = self.should_bury(name)
if save_entry:
save_name = self.entries[name].save(
bury=bury_entry, final=final)
self.log.info(
"Saved {} to '{}'.".format(name.ljust(20), save_name))
if (gz and os.path.getsize(save_name) >
self.COMPRESS_ABOVE_FILESIZE):
save_name = compress_gz(save_name)
self.log.debug(
"Compressed '{}' to '{}'".format(name, save_name))
# FIX: use subprocess
outdir, filename = os.path.split(save_name)
filename = filename.split('.')[0]
os.system('cd ' + outdir + '; git rm --cached ' +
filename + '.json; git add -f ' + filename +
'.json.gz; cd ' + self.PATHS.PATH_BASE)
if clear:
self.entries[name] = self.entries[name].get_stub()
self.log.debug("Entry for '{}' converted to stub".format(name))
return
|
Choose between each entries given name and its possible aliases for
the best one.
def set_preferred_names(self):
"""Choose between each entries given name and its possible aliases for
the best one.
"""
if len(self.entries) == 0:
self.log.error("WARNING: `entries` is empty, loading stubs")
self.load_stubs()
task_str = self.get_current_task_str()
for ni, oname in enumerate(pbar(self.entries, task_str)):
name = self.add_entry(oname)
self.entries[name].set_preferred_name()
if self.args.travis and ni > self.TRAVIS_QUERY_LIMIT:
break
return
|
Get a list of files which should be added to the given repository.
Notes
-----
* Finds files in the *root* of the given repository path.
* If `file_types` is given, only use those file types.
* If an uncompressed file is above the `size_limit`, it is compressed.
* If a compressed file is above the file limit, an error is raised
(if `fail = True`) or it is skipped (if `fail == False`).
Arguments
---------
repo : str
Path to repository
size_limit : scalar
fail : bool
Raise an error if a compressed file is still above the size limit.
file_types : list of str or None
Exclusive list of file types to add. 'None' to add all filetypes.
def _prep_git_add_file_list(self,
repo,
size_limit,
fail=True,
file_types=None):
"""Get a list of files which should be added to the given repository.
Notes
-----
* Finds files in the *root* of the given repository path.
* If `file_types` is given, only use those file types.
* If an uncompressed file is above the `size_limit`, it is compressed.
* If a compressed file is above the file limit, an error is raised
(if `fail = True`) or it is skipped (if `fail == False`).
Arguments
---------
repo : str
Path to repository
size_limit : scalar
fail : bool
Raise an error if a compressed file is still above the size limit.
file_types : list of str or None
Exclusive list of file types to add. 'None' to add all filetypes.
"""
add_files = []
if file_types is None:
file_patterns = ['*']
else:
self.log.error(
"WARNING: uncertain behavior with specified file types!")
file_patterns = ['*.' + ft for ft in file_types]
# Construct glob patterns for each file-type
file_patterns = [os.path.join(repo, fp) for fp in file_patterns]
for pattern in file_patterns:
file_list = glob(pattern)
for ff in file_list:
fsize = os.path.getsize(ff)
fname = str(ff)
comp_failed = False
# If the found file is too large
if fsize > size_limit:
self.log.debug("File '{}' size '{}' MB.".format(
fname, fsize / 1028 / 1028))
# If the file is already compressed... fail or skip
if ff.endswith('.gz'):
self.log.error(
"File '{}' is already compressed.".format(fname))
comp_failed = True
# Not yet compressed - compress it
else:
fname = compress_gz(fname)
fsize = os.path.getsize(fname)
self.log.info("Compressed to '{}', size '{}' MB".
format(fname, fsize / 1028 / 1028))
# If still too big, fail or skip
if fsize > size_limit:
comp_failed = True
# If compressed file is too large, skip file or raise error
if comp_failed:
# Raise an error
if fail:
raise RuntimeError(
"File '{}' cannot be added!".format(fname))
# Skip file without adding it
self.log.info("Skipping file.")
continue
# If everything is good, add file to list
add_files.append(fname)
return add_files
|
Load the given URL, or a cached-version.
Load page from url or cached file, depending on the current settings.
'archived' mode applies when `args.archived` is true (from
`--archived` CL argument), and when this task has `Task.archived`
also set to True.
'archived' mode:
* Try to load from cached file.
* If cache does not exist, try to load from web.
* If neither works, raise an error if ``fail == True``,
otherwise return None
non-'archived' mode:
* Try to load from url, save to cache file.
* If url fails, try to load existing cache file.
* If neither works, raise an error if ``fail == True``,
otherwise return None
'update' mode:
* In update mode, try to compare URL to cached file.
* If URL fails, return None
(cannot update)
* If URL data matches cached data, return None
(dont need to update)
* If URL is different from data, return url data
(proceed with update)
Arguments
---------
self
url : str
URL to download.
fname : str
Filename to which to save/load cached file. Inludes suffix.
NOTE: in general, this should be the source's BIBCODE.
repo : str or None
The full path of the data-repository the cached file should be
saved/loaded from. If 'None', then the current task is used to
determine the repo.
timeout : int
Time (in seconds) after which a URL query should exit.
post : dict
List of arguments to post to URL when requesting it.
archived : bool
Load a previously archived version of the file.
fail : bool
If the file/url cannot be loaded, raise an error.
write : bool
Save a new copy of the cached file.
json_sort : str or None
If data is being saved to a json file, sort first by this str.
quiet : bool
Whether to emit error messages upon being unable to find files.
verify : bool
Whether to check for valid SSL cert when downloading
def load_url(self,
url,
fname,
repo=None,
timeout=120,
post=None,
fail=False,
write=True,
json_sort=None,
cache_only=False,
archived_mode=None,
archived_task=None,
update_mode=None,
verify=False):
"""Load the given URL, or a cached-version.
Load page from url or cached file, depending on the current settings.
'archived' mode applies when `args.archived` is true (from
`--archived` CL argument), and when this task has `Task.archived`
also set to True.
'archived' mode:
* Try to load from cached file.
* If cache does not exist, try to load from web.
* If neither works, raise an error if ``fail == True``,
otherwise return None
non-'archived' mode:
* Try to load from url, save to cache file.
* If url fails, try to load existing cache file.
* If neither works, raise an error if ``fail == True``,
otherwise return None
'update' mode:
* In update mode, try to compare URL to cached file.
* If URL fails, return None
(cannot update)
* If URL data matches cached data, return None
(dont need to update)
* If URL is different from data, return url data
(proceed with update)
Arguments
---------
self
url : str
URL to download.
fname : str
Filename to which to save/load cached file. Inludes suffix.
NOTE: in general, this should be the source's BIBCODE.
repo : str or None
The full path of the data-repository the cached file should be
saved/loaded from. If 'None', then the current task is used to
determine the repo.
timeout : int
Time (in seconds) after which a URL query should exit.
post : dict
List of arguments to post to URL when requesting it.
archived : bool
Load a previously archived version of the file.
fail : bool
If the file/url cannot be loaded, raise an error.
write : bool
Save a new copy of the cached file.
json_sort : str or None
If data is being saved to a json file, sort first by this str.
quiet : bool
Whether to emit error messages upon being unable to find files.
verify : bool
Whether to check for valid SSL cert when downloading
"""
file_txt = None
url_txt = None
# Load default settings if needed
# -------------------------------
# Determine if we are running in archived mode
if archived_mode is None:
archived_mode = self.args.archived
# Determine if this task is one which uses archived files
if archived_task is None:
archived_task = self.current_task.archived
# Determine if running in update mode
if update_mode is None:
update_mode = self.args.update
# Construct the cached filename
if repo is None:
repo = self.get_current_task_repo()
cached_path = os.path.join(repo, fname)
# Load cached file if it exists
# ----------------------------
if os.path.isfile(cached_path):
with codecs.open(cached_path, 'r', encoding='utf8') as infile:
file_txt = infile.read()
self.log.debug("Task {}: Loaded from '{}'.".format(
self.current_task.name, cached_path))
# In `archived` mode and task - try to return the cached page
if archived_mode or (archived_task and not update_mode):
if file_txt is not None:
return file_txt
# If this flag is set, don't even attempt to download from web
if cache_only:
return None
# If file does not exist, log error, continue
else:
self.log.error("Task {}: Cached file '{}' does not exist.".
format(self.current_task.name, cached_path))
# Load url. 'None' is returned on failure - handle that below
url_txt = self.download_url(
url, timeout, fail=False, post=post, verify=verify)
# At this point, we might have both `url_txt` and `file_txt`
# If either of them failed, then they are set to None
# If URL download failed, error or return cached data
# ---------------------------------------------------
if url_txt is None:
# Both sources failed
if file_txt is None:
err_str = "Both url and file retrieval failed!"
# If we should raise errors on failure
if fail:
err_str += " `fail` set."
self.log.error(err_str)
raise RuntimeError(err_str)
# Otherwise warn and return None
self.log.warning(err_str)
return None
# Otherwise, if only url failed, return file data
else:
# If we are trying to update, but the url failed, then return
# None
if update_mode:
self.log.error(
"Cannot check for updates, url download failed.")
return None
# Otherwise, return file data
self.log.warning("URL download failed, using cached data.")
return file_txt
# Here: `url_txt` exists, `file_txt` may exist or may be None
# Determine if update should happen, and if file should be resaved
# Write new url_txt to cache file
# -------------------------------
if write:
self.log.info(
"Writing `url_txt` to file '{}'.".format(cached_path))
self._write_cache_file(url_txt, cached_path, json_sort=json_sort)
# If `file_txt` doesnt exist but were not writing.. warn
elif file_txt is None:
err_str = "Warning: cached file '{}' does not exist.".format(
cached_path)
err_str += " And is not being saved."
self.log.warning(err_str)
# Check if we need to update this data
# ------------------------------------
# If both `url_txt` and `file_txt` exist and update mode check MD5
if file_txt is not None and update_mode:
from hashlib import md5
url_md5 = md5(url_txt.encode('utf-8')).hexdigest()
file_md5 = md5(file_txt.encode('utf-8')).hexdigest()
self.log.debug("URL: '{}', File: '{}'.".format(url_md5, file_md5))
# If the data is the same, no need to parse (update), return None
if url_md5 == file_md5:
self.log.info(
"Skipping file '{}', no changes.".format(cached_path))
return None
else:
self.log.info("File '{}' has been updated".format(cached_path))
# Warn if we didnt save a new copy
if not write:
err_str = "Warning: updated data not saved to file."
self.log.warning(err_str)
return url_txt
|
Download text from the given url.
Returns `None` on failure.
Arguments
---------
self
url : str
URL web address to download.
timeout : int
Duration after which URL request should terminate.
fail : bool
If `True`, then an error will be raised on failure.
If `False`, then 'None' is returned on failure.
post : dict
List of arguments to post to URL when requesting it.
verify : bool
Whether to check for valid SSL cert when downloading
Returns
-------
url_txt : str or None
On success the text of the url is returned. On failure `None` is
returned.
def download_url(self, url, timeout, fail=False, post=None, verify=True):
"""Download text from the given url.
Returns `None` on failure.
Arguments
---------
self
url : str
URL web address to download.
timeout : int
Duration after which URL request should terminate.
fail : bool
If `True`, then an error will be raised on failure.
If `False`, then 'None' is returned on failure.
post : dict
List of arguments to post to URL when requesting it.
verify : bool
Whether to check for valid SSL cert when downloading
Returns
-------
url_txt : str or None
On success the text of the url is returned. On failure `None` is
returned.
"""
_CODE_ERRORS = [500, 307, 404]
import requests
session = requests.Session()
try:
headers = {
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X '
'10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/39.0.2171.95 Safari/537.36'
}
if post:
response = session.post(
url,
timeout=timeout,
headers=headers,
data=post,
verify=verify)
else:
response = session.get(
url, timeout=timeout, headers=headers, verify=verify)
response.raise_for_status()
# Look for errors
for xx in response.history:
xx.raise_for_status()
if xx.status_code in _CODE_ERRORS:
self.log.error("URL response returned status code '{}'".
format(xx.status_code))
raise
url_txt = response.text
self.log.debug("Task {}: Loaded `url_txt` from '{}'.".format(
self.current_task.name, url))
except (KeyboardInterrupt, SystemExit):
raise
except Exception as err:
err_str = ("URL Download of '{}' failed ('{}')."
.format(url, str(err)))
# Raise an error on failure
if fail:
err_str += " and `fail` is set."
self.log.error(err_str)
raise RuntimeError(err_str)
# Log a warning on error, and return None
else:
self.log.warning(err_str)
return None
return url_txt
|
Merge the source alias lists of two CatDicts.
def append_sources_from(self, other):
"""Merge the source alias lists of two CatDicts."""
# Get aliases lists from this `CatDict` and other
self_aliases = self[self._KEYS.SOURCE].split(',')
other_aliases = other[self._KEYS.SOURCE].split(',')
# Store alias to `self`
self[self._KEYS.SOURCE] = uniq_cdl(self_aliases + other_aliases)
return
|
Name of current action for progress-bar output.
The specific task string is depends on the configuration via `args`.
Returns
-------
ctask : str
String representation of this task.
def current_task(self, args):
"""Name of current action for progress-bar output.
The specific task string is depends on the configuration via `args`.
Returns
-------
ctask : str
String representation of this task.
"""
ctask = self.nice_name if self.nice_name is not None else self.name
if args is not None:
if args.update:
ctask = ctask.replace('%pre', 'Updating')
else:
ctask = ctask.replace('%pre', 'Loading')
return ctask
|
Whether previously archived data should be loaded.
def load_archive(self, args):
"""Whether previously archived data should be loaded.
"""
import warnings
warnings.warn("`Task.load_archive()` is deprecated! "
"`Catalog.load_url` handles the same functionality.")
return self.archived or args.archived
|
Use `git rev-parse HEAD <REPO>` to get current SHA.
def get_sha(path=None, log=None, short=False, timeout=None):
"""Use `git rev-parse HEAD <REPO>` to get current SHA.
"""
# git_command = "git rev-parse HEAD {}".format(repo_name).split()
# git_command = "git rev-parse HEAD".split()
git_command = ["git", "rev-parse"]
if short:
git_command.append("--short")
git_command.append("HEAD")
kwargs = {}
if path is not None:
kwargs['cwd'] = path
if timeout is not None:
kwargs['timeout'] = timeout
if log is not None:
log.debug("{} {}".format(git_command, str(kwargs)))
sha = subprocess.check_output(git_command, **kwargs)
try:
sha = sha.decode('ascii').strip()
except:
if log is not None:
log.debug("decode of '{}' failed".format(sha))
return sha
|
Add all files in each data repository tree, commit, push.
Creates a commit message based on the current catalog version info.
If either the `git add` or `git push` commands fail, an error will be
raised. Currently, if `commit` fails an error *WILL NOT* be raised
because the `commit` command will return a nonzero exit status if
there are no files to add... which we dont want to raise an error.
FIX: improve the error checking on this.
def git_add_commit_push_all_repos(cat):
"""Add all files in each data repository tree, commit, push.
Creates a commit message based on the current catalog version info.
If either the `git add` or `git push` commands fail, an error will be
raised. Currently, if `commit` fails an error *WILL NOT* be raised
because the `commit` command will return a nonzero exit status if
there are no files to add... which we dont want to raise an error.
FIX: improve the error checking on this.
"""
log = cat.log
log.debug("gitter.git_add_commit_push_all_repos()")
# Do not commit/push private repos
all_repos = cat.PATHS.get_all_repo_folders(private=False)
for repo in all_repos:
log.info("Repo in: '{}'".format(repo))
# Get the initial git SHA
sha_beg = get_sha(repo)
log.debug("Current SHA: '{}'".format(sha_beg))
# Get files that should be added, compress and check sizes
add_files = cat._prep_git_add_file_list(repo,
cat.COMPRESS_ABOVE_FILESIZE)
log.info("Found {} Files to add.".format(len(add_files)))
if len(add_files) == 0:
continue
try:
# Add all files in the repository directory tree
git_comm = ["git", "add"]
if cat.args.travis:
git_comm.append("-f")
git_comm.extend(add_files)
_call_command_in_repo(
git_comm, repo, cat.log, fail=True, log_flag=False)
# Commit these files
commit_msg = "'push' - adding all files."
commit_msg = "{} : {}".format(cat._version_long, commit_msg)
log.info(commit_msg)
git_comm = ["git", "commit", "-am", commit_msg]
_call_command_in_repo(git_comm, repo, cat.log)
# Add all files in the repository directory tree
git_comm = ["git", "push"]
if not cat.args.travis:
_call_command_in_repo(git_comm, repo, cat.log, fail=True)
except Exception as err:
try:
git_comm = ["git", "reset", "HEAD"]
_call_command_in_repo(git_comm, repo, cat.log, fail=True)
except:
pass
raise err
return
|
Perform a 'git pull' in each data repository.
> `git pull -s recursive -X theirs`
def git_pull_all_repos(cat, strategy_recursive=True, strategy='theirs'):
"""Perform a 'git pull' in each data repository.
> `git pull -s recursive -X theirs`
"""
# raise RuntimeError("THIS DOESNT WORK YET!")
log = cat.log
log.debug("gitter.git_pull_all_repos()")
log.warning("WARNING: using experimental `git_pull_all_repos()`!")
all_repos = cat.PATHS.get_all_repo_folders()
for repo_name in all_repos:
log.info("Repo in: '{}'".format(repo_name))
# Get the initial git SHA
sha_beg = get_sha(repo_name)
log.debug("Current SHA: '{}'".format(sha_beg))
# Initialize the git repository
repo = git.Repo(repo_name)
# Construct the command to call
git_comm = "git pull --verbose"
if strategy_recursive:
git_comm += " -s recursive"
if strategy is not None:
git_comm += " -X {:s}".format(strategy)
log.debug("Calling '{}'".format(git_comm))
# Call git command (do this manually to use desired options)
# Set `with_exceptions=False` to handle errors ourselves (below)
code, out, err = repo.git.execute(
git_comm.split(),
with_stdout=True,
with_extended_output=True,
with_exceptions=False)
# Handle output of git command
if len(out):
log.info(out)
if len(err):
log.info(err)
# Hangle error-codes
if code != 0:
err_str = "Command '{}' returned exit code '{}'!".format(git_comm,
code)
err_str += "\n\tout: '{}'\n\terr: '{}'".format(out, err)
log.error(err_str)
raise RuntimeError(err_str)
sha_end = get_sha(repo_name)
if sha_end != sha_beg:
log.info("Updated SHA: '{}'".format(sha_end))
return
|
Perform a 'git clone' for each data repository that doesnt exist.
def git_clone_all_repos(cat):
"""Perform a 'git clone' for each data repository that doesnt exist.
"""
log = cat.log
log.debug("gitter.git_clone_all_repos()")
all_repos = cat.PATHS.get_all_repo_folders()
out_repos = cat.PATHS.get_repo_output_folders()
for repo in all_repos:
log.info("Repo in: '{}'".format(repo))
if os.path.isdir(repo):
log.info("Directory exists.")
else:
log.debug("Cloning directory...")
clone(repo, cat.log, depth=max(cat.args.clone_depth, 1))
if cat.args.purge_outputs and repo in out_repos:
for fil in glob(os.path.join(repo, '*.json')):
os.remove(fil)
grepo = git.cmd.Git(repo)
try:
grepo.status()
except git.GitCommandError:
log.error("Repository does not exist!")
raise
# Get the initial git SHA
sha_beg = get_sha(repo)
log.debug("Current SHA: '{}'".format(sha_beg))
return
|
Perform a 'git reset' in each data repository.
def git_reset_all_repos(cat, hard=True, origin=False, clean=True):
"""Perform a 'git reset' in each data repository.
"""
log = cat.log
log.debug("gitter.git_reset_all_repos()")
all_repos = cat.PATHS.get_all_repo_folders()
for repo in all_repos:
log.warning("Repo in: '{}'".format(repo))
# Get the initial git SHA
sha_beg = get_sha(repo)
log.debug("Current SHA: '{}'".format(sha_beg))
grepo = git.cmd.Git(repo)
# Fetch first
log.info("fetching")
grepo.fetch()
args = []
if hard:
args.append('--hard')
if origin:
args.append('origin/master')
log.info("resetting")
retval = grepo.reset(*args)
if len(retval):
log.warning("Git says: '{}'".format(retval))
# Clean
if clean:
log.info("cleaning")
# [q]uiet, [f]orce, [d]irectories
retval = grepo.clean('-qdf')
if len(retval):
log.warning("Git says: '{}'".format(retval))
sha_end = get_sha(repo)
if sha_end != sha_beg:
log.debug("Updated SHA: '{}'".format(sha_end))
return
|
Perform a 'git status' in each data repository.
def git_status_all_repos(cat, hard=True, origin=False, clean=True):
"""Perform a 'git status' in each data repository.
"""
log = cat.log
log.debug("gitter.git_status_all_repos()")
all_repos = cat.PATHS.get_all_repo_folders()
for repo_name in all_repos:
log.info("Repo in: '{}'".format(repo_name))
# Get the initial git SHA
sha_beg = get_sha(repo_name)
log.debug("Current SHA: '{}'".format(sha_beg))
log.info("Fetching")
fetch(repo_name, log=cat.log)
git_comm = ["git", "status"]
_call_command_in_repo(
git_comm, repo_name, cat.log, fail=True, log_flag=True)
sha_end = get_sha(repo_name)
if sha_end != sha_beg:
log.info("Updated SHA: '{}'".format(sha_end))
return
|
Given a list of repositories, make sure they're all cloned.
Should be called from the subclassed `Catalog` objects, passed a list
of specific repository names.
Arguments
---------
all_repos : list of str
*Absolute* path specification of each target repository.
def clone(repo, log, depth=1):
"""Given a list of repositories, make sure they're all cloned.
Should be called from the subclassed `Catalog` objects, passed a list
of specific repository names.
Arguments
---------
all_repos : list of str
*Absolute* path specification of each target repository.
"""
kwargs = {}
if depth > 0:
kwargs['depth'] = depth
try:
repo_name = os.path.split(repo)[-1]
repo_name = "https://github.com/astrocatalogs/" + repo_name + ".git"
log.warning("Cloning '{}' (only needs to be done ".format(repo) +
"once, may take few minutes per repo).")
grepo = git.Repo.clone_from(repo_name, repo, **kwargs)
except:
log.error("CLONING '{}' INTERRUPTED".format(repo))
raise
return grepo
|
Use `subprocess` to call a command in a certain (repo) directory.
Logs the output (both `stderr` and `stdout`) to the log, and checks the
return codes to make sure they're valid. Raises error if not.
Raises
------
exception `subprocess.CalledProcessError`: if the command fails
def _call_command_in_repo(comm, repo, log, fail=False, log_flag=True):
"""Use `subprocess` to call a command in a certain (repo) directory.
Logs the output (both `stderr` and `stdout`) to the log, and checks the
return codes to make sure they're valid. Raises error if not.
Raises
------
exception `subprocess.CalledProcessError`: if the command fails
"""
if log_flag:
log.debug("Running '{}'.".format(" ".join(comm)))
process = subprocess.Popen(
comm, cwd=repo, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = process.communicate()
if stderr is not None:
err_msg = stderr.decode('ascii').strip().splitlines()
for em in err_msg:
log.error(em)
if stdout is not None:
out_msg = stdout.decode('ascii').strip().splitlines()
for om in out_msg:
log.warning(om)
# Raises an error if the command failed.
if fail:
if process.returncode:
raise subprocess.CalledProcessError
return
|
Check that spectrum has legal combination of attributes.
def _check(self):
"""Check that spectrum has legal combination of attributes."""
# Run the super method
super(Spectrum, self)._check()
err_str = None
has_data = self._KEYS.DATA in self
has_wave = self._KEYS.WAVELENGTHS in self
has_flux = self._KEYS.FLUXES in self
has_filename = self._KEYS.FILENAME in self
if not has_data:
if (not has_wave or not has_flux) and not has_filename:
err_str = (
"If `{}` not given".format(self._KEYS.DATA) +
"; `{}` or `{}` needed".format(
self._KEYS.WAVELENGTHS, self._KEYS.FLUXES))
if err_str is not None:
raise ValueError(err_str)
return
|
Check if spectrum is duplicate of another.
def is_duplicate_of(self, other):
"""Check if spectrum is duplicate of another."""
if super(Spectrum, self).is_duplicate_of(other):
return True
row_matches = 0
for ri, row in enumerate(self.get(self._KEYS.DATA, [])):
lambda1, flux1 = tuple(row[0:2])
if (self._KEYS.DATA not in other or
ri > len(other[self._KEYS.DATA])):
break
lambda2, flux2 = tuple(other[self._KEYS.DATA][ri][0:2])
minlambdalen = min(len(lambda1), len(lambda2))
minfluxlen = min(len(flux1), len(flux2))
if (lambda1[:minlambdalen + 1] == lambda2[:minlambdalen + 1] and
flux1[:minfluxlen + 1] == flux2[:minfluxlen + 1] and
float(flux1[:minfluxlen + 1]) != 0.0):
row_matches += 1
# Five row matches should be enough to be sure spectrum is a dupe.
if row_matches >= 5:
return True
# Matches need to happen in the first 10 rows.
if ri >= 10:
break
return False
|
Logic for sorting keys in a `Spectrum` relative to one another.
def sort_func(self, key):
"""Logic for sorting keys in a `Spectrum` relative to one another."""
if key == self._KEYS.TIME:
return 'aaa'
if key == self._KEYS.DATA:
return 'zzy'
if key == self._KEYS.SOURCE:
return 'zzz'
return key
|
Sorting logic for `Quantity` objects.
def sort_func(self, key):
"""Sorting logic for `Quantity` objects."""
if key == self._KEYS.VALUE:
return 'aaa'
if key == self._KEYS.SOURCE:
return 'zzz'
return key
|
Return this class's attribute names (those not stating with '_').
Also retrieves the attributes from base classes, e.g.
For: ``ENTRY(KeyCollection)``, ``ENTRY.keys()`` gives just the
attributes of `ENTRY` (`KeyCollection` has no keys).
For: ``SUPERNOVA(ENTRY)``, ``SUPERNOVA.keys()`` gives both the
attributes of `SUPERNOVAE` itself, and of `ENTRY`.
Returns
-------
_keys : list of str
List of names of internal attributes. Order is effectiely random.
def keys(cls):
"""Return this class's attribute names (those not stating with '_').
Also retrieves the attributes from base classes, e.g.
For: ``ENTRY(KeyCollection)``, ``ENTRY.keys()`` gives just the
attributes of `ENTRY` (`KeyCollection` has no keys).
For: ``SUPERNOVA(ENTRY)``, ``SUPERNOVA.keys()`` gives both the
attributes of `SUPERNOVAE` itself, and of `ENTRY`.
Returns
-------
_keys : list of str
List of names of internal attributes. Order is effectiely random.
"""
if cls._keys:
return cls._keys
# If `_keys` is not yet defined, create it
# ----------------------------------------
_keys = []
# get the keys from all base-classes aswell (when this is subclasses)
for mro in cls.__bases__:
# base classes below `KeyCollection` (e.g. `object`) wont work
if issubclass(mro, KeyCollection):
_keys.extend(mro.keys())
# Get the keys from this particular subclass
# Only non-hidden (no '_') and variables (non-callable)
_keys.extend([
kk for kk in vars(cls).keys()
if not kk.startswith('_') and not callable(getattr(cls, kk))
])
# Store for future retrieval
cls._keys = _keys
return cls._keys
|
Return this class's attribute values (those not stating with '_').
Returns
-------
_vals : list of objects
List of values of internal attributes. Order is effectiely random.
def vals(cls):
"""Return this class's attribute values (those not stating with '_').
Returns
-------
_vals : list of objects
List of values of internal attributes. Order is effectiely random.
"""
if cls._vals:
return cls._vals
# If `_vals` is not yet defined, create it
# ----------------------------------------
_vals = []
# get the keys from all base-classes aswell (when this is subclasses)
for mro in cls.__bases__:
# base classes below `KeyCollection` (e.g. `object`) wont work
if issubclass(mro, KeyCollection):
_vals.extend(mro.vals())
# Get the keys from this particular subclass
# Only non-hidden (no '_') and variables (non-callable)
_vals.extend([
vv for kk, vv in vars(cls).items()
if not kk.startswith('_') and not callable(getattr(cls, kk))
])
# Store for future retrieval
cls._vals = _vals
return cls._vals
|
Return this class's attribute values (those not stating with '_'),
but only for attributes with `compare` set to `True`.
Returns
-------
_compare_vals : list of objects
List of values of internal attributes to use when comparing
`CatDict` objects. Order sorted by `Key` priority, followed by
alphabetical.
def compare_vals(cls, sort=True):
"""Return this class's attribute values (those not stating with '_'),
but only for attributes with `compare` set to `True`.
Returns
-------
_compare_vals : list of objects
List of values of internal attributes to use when comparing
`CatDict` objects. Order sorted by `Key` priority, followed by
alphabetical.
"""
if cls._compare_vals:
return cls._compare_vals
# If `_compare_vals` is not yet defined, create it
# ----------------------------------------
_compare_vals = []
# get the keys from all base-classes aswell (when this is subclasses)
for mro in cls.__bases__:
# base classes below `KeyCollection` (e.g. `object`) wont work
if issubclass(mro, KeyCollection):
_compare_vals.extend(mro.compare_vals(sort=False))
# Get the keys from this particular subclass
# Only non-hidden (no '_') and variables (non-callable)
_compare_vals.extend([
vv for kk, vv in vars(cls).items()
if (not kk.startswith('_') and not callable(getattr(cls, kk)) and
vv.compare)
])
# Sort keys based on priority, high priority values first
if sort:
_compare_vals = sorted(
_compare_vals,
reverse=True,
key=lambda key: (key.priority, key.name))
# Store for future retrieval
cls._compare_vals = _compare_vals
return cls._compare_vals
|
Return a 'pretty' string representation of this `Key`.
note: do not override the builtin `__str__` or `__repr__` methods!
def pretty(self):
"""Return a 'pretty' string representation of this `Key`.
note: do not override the builtin `__str__` or `__repr__` methods!
"""
retval = ("Key(name={}, type={}, listable={}, compare={}, "
"priority={}, kind_preference={}, "
"replace_better={})").format(
self.name, self.type, self.listable, self.compare,
self.priority, self.kind_preference, self.replace_better)
return retval
|
Make sure given value is consistent with this `Key` specification.
NOTE: if `type` is 'None', then `listable` also is *not* checked.
def check(self, val):
"""Make sure given value is consistent with this `Key` specification.
NOTE: if `type` is 'None', then `listable` also is *not* checked.
"""
# If there is no `type` requirement, everything is allowed
if self.type is None:
return True
is_list = isinstance(val, list)
# If lists are not allowed, and this is a list --> false
if not self.listable and is_list:
return False
# `is_number` already checks for either list or single value
if self.type == KEY_TYPES.NUMERIC and not is_number(val):
return False
elif (self.type == KEY_TYPES.TIME and
not is_number(val) and '-' not in val and '/' not in val):
return False
elif self.type == KEY_TYPES.STRING:
# If its a list, check first element
if is_list:
if not isinstance(val[0], basestring):
return False
# Otherwise, check it
elif not isinstance(val, basestring):
return False
elif self.type == KEY_TYPES.BOOL:
if is_list and not isinstance(val[0], bool):
return False
elif not isinstance(val, bool):
return False
return True
|
Create a standard logger object which logs to file and or stdout stream.
If a logger has already been created in this session, it is returned
(unless `name` is given).
Arguments
---------
name : str,
Handle for this logger, must be distinct for a distinct logger.
stream_fmt : str or `None`,
Format of log messages to stream (stdout). If `None`, default settings
are used.
file_fmt : str or `None`,
Format of log messages to file. If `None`, default settings are used.
date_fmt : str or `None`
Format of time stamps to stream and/or file. If `None`, default
settings are used.
stream_level : int,
Logging level for stream.
file_level : int,
Logging level for file.
tofile : str or `None`,
Filename to log to (turned off if `None`).
tostr : bool,
Log to stdout stream.
Returns
-------
logger : ``logging.Logger`` object,
Logger object to use for logging.
def get_logger(name=None, stream_fmt=None, file_fmt=None, date_fmt=None,
stream_level=None, file_level=None,
tofile=None, tostr=True):
"""Create a standard logger object which logs to file and or stdout stream.
If a logger has already been created in this session, it is returned
(unless `name` is given).
Arguments
---------
name : str,
Handle for this logger, must be distinct for a distinct logger.
stream_fmt : str or `None`,
Format of log messages to stream (stdout). If `None`, default settings
are used.
file_fmt : str or `None`,
Format of log messages to file. If `None`, default settings are used.
date_fmt : str or `None`
Format of time stamps to stream and/or file. If `None`, default
settings are used.
stream_level : int,
Logging level for stream.
file_level : int,
Logging level for file.
tofile : str or `None`,
Filename to log to (turned off if `None`).
tostr : bool,
Log to stdout stream.
Returns
-------
logger : ``logging.Logger`` object,
Logger object to use for logging.
"""
if tofile is None and not tostr:
raise ValueError(
"Must log to something: `tofile` or `tostr` must be `True`.")
logger = logging.getLogger(name)
# Add a custom attribute to this `logger` so that we know when an existing
# one is being returned
if hasattr(logger, '_OSC_LOGGER'):
return logger
else:
logger._OSC_LOGGER = True
# Set other custom parameters
logger._LOADED = _LOADED_LEVEL
# Make sure handlers don't get duplicated (ipython issue)
while len(logger.handlers) > 0:
logger.handlers.pop()
# Prevents duplication or something something...
logger.propagate = 0
# Determine and Set Logging Levels
if file_level is None:
file_level = _FILE_LEVEL_DEF
if stream_level is None:
stream_level = _STREAM_LEVEL_DEF
# Logger object must be at minimum level
logger.setLevel(int(np.min([file_level, stream_level])))
if date_fmt is None:
date_fmt = '%Y/%m/%d %H:%M:%S'
# Log to file
# -----------
if tofile is not None:
if file_fmt is None:
file_fmt = "%(asctime)s %(levelname)8.8s [%(filename)20.20s:"
file_fmt += "%(funcName)-20.20s]%(indent)s%(message)s"
fileFormatter = IndentFormatter(file_fmt, datefmt=date_fmt)
fileHandler = logging.FileHandler(tofile, 'w')
fileHandler.setFormatter(fileFormatter)
fileHandler.setLevel(file_level)
logger.addHandler(fileHandler)
# Store output filename to `logger` object
logger.filename = tofile
# Log To stdout
# -------------
if tostr:
if stream_fmt is None:
stream_fmt = "%(indent)s%(message)s"
strFormatter = IndentFormatter(stream_fmt, datefmt=date_fmt)
strHandler = logging.StreamHandler()
strHandler.setFormatter(strFormatter)
strHandler.setLevel(stream_level)
logger.addHandler(strHandler)
return logger
|
Log an error message and raise an error.
Arguments
---------
log : `logging.Logger` object
err_str : str
Error message to be logged and raised.
err_type : `Exception` object
Type of error to raise.
def log_raise(log, err_str, err_type=RuntimeError):
"""Log an error message and raise an error.
Arguments
---------
log : `logging.Logger` object
err_str : str
Error message to be logged and raised.
err_type : `Exception` object
Type of error to raise.
"""
log.error(err_str)
# Make sure output is flushed
# (happens automatically to `StreamHandlers`, but not `FileHandlers`)
for handle in log.handlers:
handle.flush()
# Raise given error
raise err_type(err_str)
|
Log the current memory usage.
def log_memory(log, pref=None, lvl=logging.DEBUG, raise_flag=True):
"""Log the current memory usage.
"""
import os
import sys
cyc_str = ""
KB = 1024.0
if pref is not None:
cyc_str += "{}: ".format(pref)
# Linux returns units in Bytes; OSX in kilobytes
UNIT = KB*KB if sys.platform == 'darwin' else KB
good = False
# Use the `resource` module to check the maximum memory usage of this process
try:
import resource
max_self = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
max_child = resource.getrusage(resource.RUSAGE_CHILDREN).ru_maxrss
_str = "RSS Max Self: {:7.2f} [MB], Child: {:7.2f} [MB]".format(
max_self/UNIT, max_child/UNIT)
cyc_str += _str
except Exception as err:
log.log(lvl, "resource.getrusage failed. '{}'".format(str(err)))
if raise_flag:
raise
else:
good = True
# Use the `psutil` module to check the current memory/cpu usage of this process
try:
import psutil
process = psutil.Process(os.getpid())
rss = process.memory_info().rss
cpu_perc = process.cpu_percent()
mem_perc = process.memory_percent()
num_thr = process.num_threads()
_str = "; RSS: {:7.2f} [MB], {:7.2f}%; Threads: {:3d}, CPU: {:7.2f}%".format(
rss/UNIT, mem_perc, num_thr, cpu_perc)
cyc_str += _str
except Exception as err:
log.log(lvl, "psutil.Process failed. '{}'".format(str(err)))
if raise_flag:
raise
else:
good = True
if good:
log.log(lvl, cyc_str)
return
|
img: can be RGB (MxNx3) or gray (MxN)
http://docs.opencv.org/master/modules/features2d/doc/drawing_function_of_keypoints_and_matches.html
http://docs.opencv.org/trunk/modules/features2d/doc/drawing_function_of_keypoints_and_matches.html
def doblob(morphed, blobdet, img, anno=True):
"""
img: can be RGB (MxNx3) or gray (MxN)
http://docs.opencv.org/master/modules/features2d/doc/drawing_function_of_keypoints_and_matches.html
http://docs.opencv.org/trunk/modules/features2d/doc/drawing_function_of_keypoints_and_matches.html
"""
keypoints = blobdet.detect(morphed)
nkey = len(keypoints)
kpsize = asarray([k.size for k in keypoints])
final = img.copy() # is the .copy necessary?
final = cv2.drawKeypoints(img, keypoints, outImage=final,
flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
# %% plot count of blobs
if anno:
cv2.putText(final, text=str(nkey), org=(int(img.shape[1]*.9), 25),
fontFace=cv2.FONT_HERSHEY_PLAIN, fontScale=2,
color=(0, 255, 0), thickness=2)
return final, nkey, kpsize
|
Parse arguments and return configuration settings.
def load_args(self, args, clargs):
"""Parse arguments and return configuration settings.
"""
# Parse All Arguments
args = self.parser.parse_args(args=clargs, namespace=args)
# Print the help information if no subcommand is given
# subcommand is required for operation
if args.subcommand is None:
self.parser.print_help()
args = None
return args
|
Create `argparse` instance, and setup with appropriate parameters.
def _setup_argparse(self):
"""Create `argparse` instance, and setup with appropriate parameters.
"""
parser = argparse.ArgumentParser(
prog='catalog', description='Parent Catalog class for astrocats.')
subparsers = parser.add_subparsers(
description='valid subcommands', dest='subcommand')
# Data Import
# -----------
# Add the 'import' command, and related arguments
self._add_parser_arguments_import(subparsers)
# Git Subcommands
# ---------------
self._add_parser_arguments_git(subparsers)
# Analyze Catalogs
# ----------------
# Add the 'analyze' command, and related arguments
self._add_parser_arguments_analyze(subparsers)
return parser
|
Create parser for 'import' subcommand, and associated arguments.
def _add_parser_arguments_import(self, subparsers):
"""Create parser for 'import' subcommand, and associated arguments.
"""
import_pars = subparsers.add_parser(
"import", help="Import data.")
import_pars.add_argument(
'--update', '-u', dest='update',
default=False, action='store_true',
help='Only update catalog using live sources.')
import_pars.add_argument(
'--load-stubs', dest='load_stubs',
default=False, action='store_true',
help='Load stubs before running.')
import_pars.add_argument(
'--archived', '-a', dest='archived',
default=False, action='store_true',
help='Always use task caches.')
# Control which 'tasks' are executed
# ----------------------------------
import_pars.add_argument(
'--tasks', dest='args_task_list', nargs='*', default=None,
help='space delimited list of tasks to perform.')
import_pars.add_argument(
'--yes', dest='yes_task_list', nargs='+', default=None,
help='space delimited list of tasks to turn on.')
import_pars.add_argument(
'--no', dest='no_task_list', nargs='+', default=None,
help='space delimited list of tasks to turn off.')
import_pars.add_argument(
'--min-task-priority', dest='min_task_priority',
default=None,
help='minimum priority for a task to run')
import_pars.add_argument(
'--max-task-priority', dest='max_task_priority',
default=None,
help='maximum priority for a task to run')
import_pars.add_argument(
'--task-groups', dest='task_groups',
default=None,
help='predefined group(s) of tasks to run.')
return import_pars
|
Create a sub-parsers for git subcommands.
def _add_parser_arguments_git(self, subparsers):
"""Create a sub-parsers for git subcommands.
"""
subparsers.add_parser(
"git-clone",
help="Clone all defined data repositories if they dont exist.")
subparsers.add_parser(
"git-push",
help="Add all files to data repositories, commit, and push.")
subparsers.add_parser(
"git-pull",
help="'Pull' all data repositories.")
subparsers.add_parser(
"git-reset-local",
help="Hard reset all data repositories using local 'HEAD'.")
subparsers.add_parser(
"git-reset-origin",
help="Hard reset all data repositories using 'origin/master'.")
subparsers.add_parser(
"git-status",
help="Get the 'git status' of all data repositories.")
return
|
Create a parser for the 'analyze' subcommand.
def _add_parser_arguments_analyze(self, subparsers):
"""Create a parser for the 'analyze' subcommand.
"""
lyze_pars = subparsers.add_parser(
"analyze",
help="Perform basic analysis on this catalog.")
lyze_pars.add_argument(
'--count', '-c', dest='count',
default=False, action='store_true',
help='Determine counts of entries, files, etc.')
return lyze_pars
|
Compress the file with the given name and delete the uncompressed file.
The compressed filename is simply the input filename with '.gz' appended.
Arguments
---------
fname : str
Name of the file to compress and delete.
Returns
-------
comp_fname : str
Name of the compressed file produced. Equal to `fname + '.gz'`.
def compress_gz(fname):
"""Compress the file with the given name and delete the uncompressed file.
The compressed filename is simply the input filename with '.gz' appended.
Arguments
---------
fname : str
Name of the file to compress and delete.
Returns
-------
comp_fname : str
Name of the compressed file produced. Equal to `fname + '.gz'`.
"""
import shutil
import gzip
comp_fname = fname + '.gz'
with codecs.open(fname, 'rb') as f_in, gzip.open(
comp_fname, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(fname)
return comp_fname
|
draws flow vectors on image
this came from opencv/examples directory
another way: http://docs.opencv.org/trunk/doc/py_tutorials/py_gui/py_drawing_functions/py_drawing_functions.html
def draw_flow(img, flow, step=16, dtype=uint8):
"""
draws flow vectors on image
this came from opencv/examples directory
another way: http://docs.opencv.org/trunk/doc/py_tutorials/py_gui/py_drawing_functions/py_drawing_functions.html
"""
maxval = iinfo(img.dtype).max
# scaleFact = 1. #arbitary factor to make flow visible
canno = (0, maxval, 0) # green color
h, w = img.shape[:2]
y, x = mgrid[step//2:h:step, step//2:w:step].reshape(2, -1)
fx, fy = flow[y, x].T
# create line endpoints
lines = vstack([x, y, (x+fx), (y+fy)]).T.reshape(-1, 2, 2)
lines = int32(lines + 0.5)
# create image
if img.ndim == 2: # assume gray
vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
else: # already RGB
vis = img
# draw line
cv2.polylines(vis, lines, isClosed=False, color=canno, thickness=1, lineType=8)
# draw filled green circles
for (x1, y1), (x2, y2) in lines:
cv2.circle(vis, center=(x1, y1), radius=1, color=canno, thickness=-1)
return vis
|
mag must be uint8, uint16, uint32 and 2-D
ang is in radians (float)
def draw_hsv(mag, ang, dtype=uint8, fn=None):
"""
mag must be uint8, uint16, uint32 and 2-D
ang is in radians (float)
"""
assert mag.shape == ang.shape
assert mag.ndim == 2
maxval = iinfo(dtype).max
hsv = dstack(((degrees(ang)/2).astype(dtype), # /2 to keep less than 255
ones_like(mag)*maxval, # maxval must be after in 1-D case
cv2.normalize(mag, alpha=0, beta=maxval, norm_type=cv2.NORM_MINMAX)))
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
if fn is not None:
print('writing ' + fn)
cv2.imwrite(fn, rgb)
return rgb
|
flow dimensions y,x,2 3-D. flow[...,0] is magnitude, flow[...,1] is angle
def flow2magang(flow, dtype=uint8):
"""
flow dimensions y,x,2 3-D. flow[...,0] is magnitude, flow[...,1] is angle
"""
fx, fy = flow[..., 0], flow[..., 1]
return hypot(fx, fy).astype(dtype), arctan2(fy, fx) + pi
|
dir
One of IOC_NONE, IOC_WRITE, IOC_READ, or IOC_READ|IOC_WRITE.
Direction is from the application's point of view, not kernel's.
size (14-bits unsigned integer)
Size of the buffer passed to ioctl's "arg" argument.
def IOC(dir, type, nr, size):
"""
dir
One of IOC_NONE, IOC_WRITE, IOC_READ, or IOC_READ|IOC_WRITE.
Direction is from the application's point of view, not kernel's.
size (14-bits unsigned integer)
Size of the buffer passed to ioctl's "arg" argument.
"""
assert dir <= _IOC_DIRMASK, dir
assert type <= _IOC_TYPEMASK, type
assert nr <= _IOC_NRMASK, nr
assert size <= _IOC_SIZEMASK, size
return (dir << _IOC_DIRSHIFT) | (type << _IOC_TYPESHIFT) | (nr << _IOC_NRSHIFT) | (size << _IOC_SIZESHIFT)
|
Returns the size of given type, and check its suitability for use in an
ioctl command number.
def IOC_TYPECHECK(t):
"""
Returns the size of given type, and check its suitability for use in an
ioctl command number.
"""
result = ctypes.sizeof(t)
assert result <= _IOC_SIZEMASK, result
return result
|
An ioctl with read parameters.
size (ctype type or instance)
Type/structure of the argument passed to ioctl's "arg" argument.
def IOR(type, nr, size):
"""
An ioctl with read parameters.
size (ctype type or instance)
Type/structure of the argument passed to ioctl's "arg" argument.
"""
return IOC(IOC_READ, type, nr, IOC_TYPECHECK(size))
|
An ioctl with write parameters.
size (ctype type or instance)
Type/structure of the argument passed to ioctl's "arg" argument.
def IOW(type, nr, size):
"""
An ioctl with write parameters.
size (ctype type or instance)
Type/structure of the argument passed to ioctl's "arg" argument.
"""
return IOC(IOC_WRITE, type, nr, IOC_TYPECHECK(size))
|
An ioctl with both read an writes parameters.
size (ctype type or instance)
Type/structure of the argument passed to ioctl's "arg" argument.
def IOWR(type, nr, size):
"""
An ioctl with both read an writes parameters.
size (ctype type or instance)
Type/structure of the argument passed to ioctl's "arg" argument.
"""
return IOC(IOC_READ | IOC_WRITE, type, nr, IOC_TYPECHECK(size))
|
Get a path including only the trailing `num` directories.
Returns
-------
last_path : str
def _get_last_dirs(path, num=1):
"""Get a path including only the trailing `num` directories.
Returns
-------
last_path : str
"""
head, tail = os.path.split(path)
last_path = str(tail)
for ii in range(num):
head, tail = os.path.split(head)
last_path = os.path.join(tail, last_path)
last_path = "..." + last_path
return last_path
|
Run the analysis routines determined from the given `args`.
def analyze(self, args):
"""Run the analysis routines determined from the given `args`.
"""
self.log.info("Running catalog analysis")
if args.count:
self.count()
return
|
Analyze the counts of ...things.
Returns
-------
retvals : dict
Dictionary of 'property-name: counts' pairs for further processing
def count(self):
"""Analyze the counts of ...things.
Returns
-------
retvals : dict
Dictionary of 'property-name: counts' pairs for further processing
"""
self.log.info("Running 'count'")
retvals = {}
# Numbers of 'tasks'
num_tasks = self._count_tasks()
retvals['num_tasks'] = num_tasks
# Numbers of 'files'
num_files = self._count_repo_files()
retvals['num_files'] = num_files
return retvals
|
Count the number of tasks, both in the json and directory.
Returns
-------
num_tasks : int
The total number of all tasks included in the `tasks.json` file.
def _count_tasks(self):
"""Count the number of tasks, both in the json and directory.
Returns
-------
num_tasks : int
The total number of all tasks included in the `tasks.json` file.
"""
self.log.warning("Tasks:")
tasks, task_names = self.catalog._load_task_list_from_file()
# Total number of all tasks
num_tasks = len(tasks)
# Number which are active by default
num_tasks_act = len([tt for tt, vv in tasks.items() if vv.active])
# Number of python files in the tasks directory
num_task_files = os.path.join(self.catalog.PATHS.tasks_dir, '*.py')
num_task_files = len(glob(num_task_files))
tasks_str = "{} ({} default active) with {} task-files.".format(
num_tasks, num_tasks_act, num_task_files)
self.log.warning(tasks_str)
return num_tasks
|
Count the number of files in the data repositories.
`_COUNT_FILE_TYPES` are used to determine which file types are checked
explicitly.
`_IGNORE_FILES` determine which files are ignored in (most) counts.
Returns
-------
repo_files : int
Total number of (non-ignored) files in all data repositories.
def _count_repo_files(self):
"""Count the number of files in the data repositories.
`_COUNT_FILE_TYPES` are used to determine which file types are checked
explicitly.
`_IGNORE_FILES` determine which files are ignored in (most) counts.
Returns
-------
repo_files : int
Total number of (non-ignored) files in all data repositories.
"""
self.log.warning("Files:")
num_files = 0
repos = self.catalog.PATHS.get_all_repo_folders()
num_type = np.zeros(len(self._COUNT_FILE_TYPES), dtype=int)
num_ign = 0
for rep in repos:
# Get the last portion of the filepath for this repo
last_path = _get_last_dirs(rep, 2)
# Get counts for different file types
n_all = self._count_files_by_type(rep, '*')
n_type = np.zeros(len(self._COUNT_FILE_TYPES), dtype=int)
for ii, ftype in enumerate(self._COUNT_FILE_TYPES):
n_type[ii] = self._count_files_by_type(rep, '*.' + ftype)
# Get the number of ignored files
# (total including ignore, minus 'all')
n_ign = self._count_files_by_type(rep, '*', ignore=False)
n_ign -= n_all
f_str = self._file_nums_str(n_all, n_type, n_ign)
f_str = "{}: {}".format(last_path, f_str)
self.log.warning(f_str)
# Update cumulative counts
num_files += n_all
num_type += n_type
num_ign += n_ign
f_str = self._file_nums_str(num_files, num_type, num_ign)
self.log.warning(f_str)
return num_files
|
Construct a string showing the number of different file types.
Returns
-------
f_str : str
def _file_nums_str(self, n_all, n_type, n_ign):
"""Construct a string showing the number of different file types.
Returns
-------
f_str : str
"""
# 'other' is the difference between all and named
n_oth = n_all - np.sum(n_type)
f_str = "{} Files".format(n_all) + " ("
if len(n_type):
f_str += ", ".join("{} {}".format(name, num) for name, num in
zip(self._COUNT_FILE_TYPES, n_type))
f_str += ", "
f_str += "other {}; {} ignored)".format(n_oth, n_ign)
return f_str
|
Count files in the given path, with the given pattern.
If `ignore = True`, skip files in the `_IGNORE_FILES` list.
Returns
-------
num_files : int
def _count_files_by_type(self, path, pattern, ignore=True):
"""Count files in the given path, with the given pattern.
If `ignore = True`, skip files in the `_IGNORE_FILES` list.
Returns
-------
num_files : int
"""
# Get all files matching the given path and pattern
files = glob(os.path.join(path, pattern))
# Count the files
files = [ff for ff in files
if os.path.split(ff)[-1] not in self._IGNORE_FILES
or not ignore]
num_files = len(files)
return num_files
|
Given a URL, try to find the ADS bibcode.
Currently: only `ads` URLs will work, e.g.
Returns
-------
code : str or 'None'
The Bibcode if found, otherwise 'None'
def bibcode_from_url(cls, url):
"""Given a URL, try to find the ADS bibcode.
Currently: only `ads` URLs will work, e.g.
Returns
-------
code : str or 'None'
The Bibcode if found, otherwise 'None'
"""
try:
code = url.split('/abs/')
code = code[1].strip()
return code
except:
return None
|
Return the path that this Entry should be saved to.
def _get_save_path(self, bury=False):
"""Return the path that this Entry should be saved to."""
filename = self.get_filename(self[self._KEYS.NAME])
# Put objects that shouldn't belong in this catalog in the boneyard
if bury:
outdir = self.catalog.get_repo_boneyard()
# Get normal repository save directory
else:
repo_folders = self.catalog.PATHS.get_repo_output_folders()
# If no repo folders exist, raise an error -- cannot save
if not len(repo_folders):
err_str = (
"No output data repositories found. Cannot save.\n"
"Make sure that repo names are correctly configured "
"in the `input/repos.json` file, and either manually or "
"automatically (using `astrocats CATALOG git-clone`) "
"clone the appropriate data repositories.")
self.catalog.log.error(err_str)
raise RuntimeError(err_str)
outdir = repo_folders[0]
return outdir, filename
|
Convert the object into a plain OrderedDict.
def _ordered(self, odict):
"""Convert the object into a plain OrderedDict."""
ndict = OrderedDict()
if isinstance(odict, CatDict) or isinstance(odict, Entry):
key = odict.sort_func
else:
key = None
nkeys = list(sorted(odict.keys(), key=key))
for key in nkeys:
if isinstance(odict[key], OrderedDict):
odict[key] = self._ordered(odict[key])
if isinstance(odict[key], list):
if (not (odict[key] and
not isinstance(odict[key][0], OrderedDict))):
nlist = []
for item in odict[key]:
if isinstance(item, OrderedDict):
nlist.append(self._ordered(item))
else:
nlist.append(item)
odict[key] = nlist
ndict[key] = odict[key]
return ndict
|
Return a unique hash associated with the listed keys.
def get_hash(self, keys=[]):
"""Return a unique hash associated with the listed keys."""
if not len(keys):
keys = list(self.keys())
string_rep = ''
oself = self._ordered(deepcopy(self))
for key in keys:
string_rep += json.dumps(oself.get(key, ''), sort_keys=True)
return hashlib.sha512(string_rep.encode()).hexdigest()[:16]
|
Clean quantity value before it is added to entry.
def _clean_quantity(self, quantity):
"""Clean quantity value before it is added to entry."""
value = quantity.get(QUANTITY.VALUE, '').strip()
error = quantity.get(QUANTITY.E_VALUE, '').strip()
unit = quantity.get(QUANTITY.U_VALUE, '').strip()
kind = quantity.get(QUANTITY.KIND, '')
if isinstance(kind, list) and not isinstance(kind, string_types):
kind = [x.strip() for x in kind]
else:
kind = kind.strip()
if not value:
return False
if is_number(value):
value = '%g' % Decimal(value)
if error:
error = '%g' % Decimal(error)
if value:
quantity[QUANTITY.VALUE] = value
if error:
quantity[QUANTITY.E_VALUE] = error
if unit:
quantity[QUANTITY.U_VALUE] = unit
if kind:
quantity[QUANTITY.KIND] = kind
return True
|
Convert `OrderedDict` into `Entry` or its derivative classes.
def _convert_odict_to_classes(self,
data,
clean=False,
merge=True,
pop_schema=True,
compare_to_existing=True,
filter_on={}):
"""Convert `OrderedDict` into `Entry` or its derivative classes."""
self._log.debug("_convert_odict_to_classes(): {}".format(self.name()))
self._log.debug("This should be a temporary fix. Dont be lazy.")
# Setup filters. Currently only used for photometry.
fkeys = list(filter_on.keys())
# Handle 'name'
name_key = self._KEYS.NAME
if name_key in data:
self[name_key] = data.pop(name_key)
# Handle 'schema'
schema_key = self._KEYS.SCHEMA
if schema_key in data:
# Schema should be re-added every execution (done elsewhere) so
# just delete the old entry
if pop_schema:
data.pop(schema_key)
else:
self[schema_key] = data.pop(schema_key)
# Cleanup 'internal' repository stuff
if clean:
# Add data to `self` in ways accomodating 'internal' formats and
# leeway. Removes each added entry from `data` so the remaining
# stuff can be handled normally
data = self.clean_internal(data)
# Handle 'sources'
# ----------------
src_key = self._KEYS.SOURCES
if src_key in data:
# Remove from `data`
sources = data.pop(src_key)
self._log.debug("Found {} '{}' entries".format(
len(sources), src_key))
self._log.debug("{}: {}".format(src_key, sources))
for src in sources:
self.add_source(allow_alias=True, **src)
# Handle `photometry`
# -------------------
photo_key = self._KEYS.PHOTOMETRY
if photo_key in data:
photoms = data.pop(photo_key)
self._log.debug("Found {} '{}' entries".format(
len(photoms), photo_key))
phcount = 0
for photo in photoms:
skip = False
for fkey in fkeys:
if fkey in photo and photo[fkey] not in filter_on[fkey]:
skip = True
if skip:
continue
self._add_cat_dict(
Photometry,
self._KEYS.PHOTOMETRY,
compare_to_existing=compare_to_existing,
**photo)
phcount += 1
self._log.debug("Added {} '{}' entries".format(
phcount, photo_key))
# Handle `spectra`
# ---------------
spec_key = self._KEYS.SPECTRA
if spec_key in data:
# When we are cleaning internal data, we don't always want to
# require all of the normal spectrum data elements.
spectra = data.pop(spec_key)
self._log.debug("Found {} '{}' entries".format(
len(spectra), spec_key))
for spec in spectra:
self._add_cat_dict(
Spectrum,
self._KEYS.SPECTRA,
compare_to_existing=compare_to_existing,
**spec)
# Handle `error`
# --------------
err_key = self._KEYS.ERRORS
if err_key in data:
errors = data.pop(err_key)
self._log.debug("Found {} '{}' entries".format(
len(errors), err_key))
for err in errors:
self._add_cat_dict(Error, self._KEYS.ERRORS, **err)
# Handle `models`
# ---------------
model_key = self._KEYS.MODELS
if model_key in data:
# When we are cleaning internal data, we don't always want to
# require all of the normal spectrum data elements.
model = data.pop(model_key)
self._log.debug("Found {} '{}' entries".format(
len(model), model_key))
for mod in model:
self._add_cat_dict(
Model,
self._KEYS.MODELS,
compare_to_existing=compare_to_existing,
**mod)
# Handle everything else --- should be `Quantity`s
# ------------------------------------------------
if len(data):
self._log.debug("{} remaining entries, assuming `Quantity`".format(
len(data)))
# Iterate over remaining keys
for key in list(data.keys()):
vals = data.pop(key)
# All quantities should be in lists of that quantity
# E.g. `aliases` is a list of alias quantities
if not isinstance(vals, list):
vals = [vals]
self._log.debug("{}: {}".format(key, vals))
for vv in vals:
self._add_cat_dict(
Quantity,
key,
check_for_dupes=merge,
compare_to_existing=compare_to_existing,
**vv)
if merge and self.dupe_of:
self.merge_dupes()
return
|
Check that a source exists and that a quantity isn't erroneous.
def _check_cat_dict_source(self, cat_dict_class, key_in_self, **kwargs):
"""Check that a source exists and that a quantity isn't erroneous."""
# Make sure that a source is given
source = kwargs.get(cat_dict_class._KEYS.SOURCE, None)
if source is None:
raise CatDictError(
"{}: `source` must be provided!".format(self[self._KEYS.NAME]),
warn=True)
# Check that source is a list of integers
for x in source.split(','):
if not is_integer(x):
raise CatDictError(
"{}: `source` is comma-delimited list of "
" integers!".format(self[self._KEYS.NAME]),
warn=True)
# If this source/data is erroneous, skip it
if self.is_erroneous(key_in_self, source):
self._log.info("This source is erroneous, skipping")
return None
# If this source/data is private, skip it
if (self.catalog.args is not None and not self.catalog.args.private and
self.is_private(key_in_self, source)):
self._log.info("This source is private, skipping")
return None
return source
|
Add a `CatDict` to this `Entry`.
CatDict only added if initialization succeeds and it
doesn't already exist within the Entry.
def _add_cat_dict(self,
cat_dict_class,
key_in_self,
check_for_dupes=True,
compare_to_existing=True,
**kwargs):
"""Add a `CatDict` to this `Entry`.
CatDict only added if initialization succeeds and it
doesn't already exist within the Entry.
"""
# Make sure that a source is given, and is valid (nor erroneous)
if cat_dict_class != Error:
try:
source = self._check_cat_dict_source(cat_dict_class,
key_in_self, **kwargs)
except CatDictError as err:
if err.warn:
self._log.info("'{}' Not adding '{}': '{}'".format(self[
self._KEYS.NAME], key_in_self, str(err)))
return False
if source is None:
return False
# Try to create a new instance of this subclass of `CatDict`
new_entry = self._init_cat_dict(cat_dict_class, key_in_self, **kwargs)
if new_entry is None:
return False
# Compare this new entry with all previous entries to make sure is new
if compare_to_existing and cat_dict_class != Error:
for item in self.get(key_in_self, []):
if new_entry.is_duplicate_of(item):
item.append_sources_from(new_entry)
# Return the entry in case we want to use any additional
# tags to augment the old entry
return new_entry
# If this is an alias, add it to the parent catalog's reverse
# dictionary linking aliases to names for fast lookup.
if key_in_self == self._KEYS.ALIAS:
# Check if this adding this alias makes us a dupe, if so mark
# ourselves as a dupe.
if (check_for_dupes and 'aliases' in dir(self.catalog) and
new_entry[QUANTITY.VALUE] in self.catalog.aliases):
possible_dupe = self.catalog.aliases[new_entry[QUANTITY.VALUE]]
# print(possible_dupe)
if (possible_dupe != self[self._KEYS.NAME] and
possible_dupe in self.catalog.entries):
self.dupe_of.append(possible_dupe)
if 'aliases' in dir(self.catalog):
self.catalog.aliases[new_entry[QUANTITY.VALUE]] = self[
self._KEYS.NAME]
self.setdefault(key_in_self, []).append(new_entry)
if (key_in_self == self._KEYS.ALIAS and check_for_dupes and
self.dupe_of):
self.merge_dupes()
return True
|
Construct a new `Entry` instance from an input file.
The input file can be given explicitly by `path`, or a path will
be constructed appropriately if possible.
Arguments
---------
catalog : `astrocats.catalog.catalog.Catalog` instance
The parent catalog object of which this entry belongs.
name : str or 'None'
The name of this entry, e.g. `SN1987A` for a `Supernova` entry.
If no `path` is given, a path is constructed by trying to find
a file in one of the 'output' repositories with this `name`.
note: either `name` or `path` must be provided.
path : str or 'None'
The absolutely path of the input file.
note: either `name` or `path` must be provided.
clean : bool
Whether special sanitization processing should be done on the input
data. This is mostly for input files from the 'internal'
repositories.
def init_from_file(cls,
catalog,
name=None,
path=None,
clean=False,
merge=True,
pop_schema=True,
ignore_keys=[],
compare_to_existing=True,
try_gzip=False,
filter_on={}):
"""Construct a new `Entry` instance from an input file.
The input file can be given explicitly by `path`, or a path will
be constructed appropriately if possible.
Arguments
---------
catalog : `astrocats.catalog.catalog.Catalog` instance
The parent catalog object of which this entry belongs.
name : str or 'None'
The name of this entry, e.g. `SN1987A` for a `Supernova` entry.
If no `path` is given, a path is constructed by trying to find
a file in one of the 'output' repositories with this `name`.
note: either `name` or `path` must be provided.
path : str or 'None'
The absolutely path of the input file.
note: either `name` or `path` must be provided.
clean : bool
Whether special sanitization processing should be done on the input
data. This is mostly for input files from the 'internal'
repositories.
"""
if not catalog:
from astrocats.catalog.catalog import Catalog
log = logging.getLogger()
catalog = Catalog(None, log)
catalog.log.debug("init_from_file()")
if name is None and path is None:
err = ("Either entry `name` or `path` must be specified to load "
"entry.")
log.error(err)
raise ValueError(err)
# If the path is given, use that to load from
load_path = ''
if path is not None:
load_path = path
name = ''
# If the name is given, try to find a path for it
else:
repo_paths = catalog.PATHS.get_repo_output_folders()
for rep in repo_paths:
filename = cls.get_filename(name)
newpath = os.path.join(rep, filename + '.json')
if os.path.isfile(newpath):
load_path = newpath
break
if load_path is None or not os.path.isfile(load_path):
# FIX: is this warning worthy?
return None
# Create a new `Entry` instance
new_entry = cls(catalog, name)
# Check if .gz file
if try_gzip and not load_path.endswith('.gz'):
try_gzip = False
# Fill it with data from json file
new_entry._load_data_from_json(
load_path,
clean=clean,
merge=merge,
pop_schema=pop_schema,
ignore_keys=ignore_keys,
compare_to_existing=compare_to_existing,
gzip=try_gzip,
filter_on=filter_on)
return new_entry
|
Add an alias, optionally 'cleaning' the alias string.
Calls the parent `catalog` method `clean_entry_name` - to apply the
same name-cleaning as is applied to entry names themselves.
Returns
-------
alias : str
The stored version of the alias (cleaned or not).
def add_alias(self, alias, source, clean=True):
"""Add an alias, optionally 'cleaning' the alias string.
Calls the parent `catalog` method `clean_entry_name` - to apply the
same name-cleaning as is applied to entry names themselves.
Returns
-------
alias : str
The stored version of the alias (cleaned or not).
"""
if clean:
alias = self.catalog.clean_entry_name(alias)
self.add_quantity(self._KEYS.ALIAS, alias, source)
return alias
|
Add an `Error` instance to this entry.
def add_error(self, value, **kwargs):
"""Add an `Error` instance to this entry."""
kwargs.update({ERROR.VALUE: value})
self._add_cat_dict(Error, self._KEYS.ERRORS, **kwargs)
return
|
Add a `Photometry` instance to this entry.
def add_photometry(self, compare_to_existing=True, **kwargs):
"""Add a `Photometry` instance to this entry."""
self._add_cat_dict(
Photometry,
self._KEYS.PHOTOMETRY,
compare_to_existing=compare_to_existing,
**kwargs)
return
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.