text
stringlengths 81
112k
|
|---|
Merge two entries that correspond to the same entry.
def merge_dupes(self):
"""Merge two entries that correspond to the same entry."""
for dupe in self.dupe_of:
if dupe in self.catalog.entries:
if self.catalog.entries[dupe]._stub:
# merge = False to avoid infinite recursion
self.catalog.load_entry_from_name(
dupe, delete=True, merge=False)
self.catalog.copy_entry_to_entry(self.catalog.entries[dupe],
self)
del self.catalog.entries[dupe]
self.dupe_of = []
|
Add an `Quantity` instance to this entry.
def add_quantity(self,
quantities,
value,
source,
check_for_dupes=True,
compare_to_existing=True,
**kwargs):
"""Add an `Quantity` instance to this entry."""
success = True
for quantity in listify(quantities):
kwargs.update({QUANTITY.VALUE: value, QUANTITY.SOURCE: source})
cat_dict = self._add_cat_dict(
Quantity,
quantity,
compare_to_existing=compare_to_existing,
check_for_dupes=check_for_dupes,
**kwargs)
if isinstance(cat_dict, CatDict):
self._append_additional_tags(quantity, source, cat_dict)
success = False
return success
|
Add a source that refers to the catalog itself.
For now this points to the Open Supernova Catalog by default.
def add_self_source(self):
"""Add a source that refers to the catalog itself.
For now this points to the Open Supernova Catalog by default.
"""
return self.add_source(
bibcode=self.catalog.OSC_BIBCODE,
name=self.catalog.OSC_NAME,
url=self.catalog.OSC_URL,
secondary=True)
|
Add a `Source` instance to this entry.
def add_source(self, allow_alias=False, **kwargs):
"""Add a `Source` instance to this entry."""
if not allow_alias and SOURCE.ALIAS in kwargs:
err_str = "`{}` passed in kwargs, this shouldn't happen!".format(
SOURCE.ALIAS)
self._log.error(err_str)
raise RuntimeError(err_str)
# Set alias number to be +1 of current number of sources
if SOURCE.ALIAS not in kwargs:
kwargs[SOURCE.ALIAS] = str(self.num_sources() + 1)
source_obj = self._init_cat_dict(Source, self._KEYS.SOURCES, **kwargs)
if source_obj is None:
return None
for item in self.get(self._KEYS.SOURCES, ''):
if source_obj.is_duplicate_of(item):
return item[item._KEYS.ALIAS]
self.setdefault(self._KEYS.SOURCES, []).append(source_obj)
return source_obj[source_obj._KEYS.ALIAS]
|
Add a `Model` instance to this entry.
def add_model(self, allow_alias=False, **kwargs):
"""Add a `Model` instance to this entry."""
if not allow_alias and MODEL.ALIAS in kwargs:
err_str = "`{}` passed in kwargs, this shouldn't happen!".format(
SOURCE.ALIAS)
self._log.error(err_str)
raise RuntimeError(err_str)
# Set alias number to be +1 of current number of models
if MODEL.ALIAS not in kwargs:
kwargs[MODEL.ALIAS] = str(self.num_models() + 1)
model_obj = self._init_cat_dict(Model, self._KEYS.MODELS, **kwargs)
if model_obj is None:
return None
for item in self.get(self._KEYS.MODELS, ''):
if model_obj.is_duplicate_of(item):
return item[item._KEYS.ALIAS]
self.setdefault(self._KEYS.MODELS, []).append(model_obj)
return model_obj[model_obj._KEYS.ALIAS]
|
Add a `Spectrum` instance to this entry.
def add_spectrum(self, compare_to_existing=True, **kwargs):
"""Add a `Spectrum` instance to this entry."""
spec_key = self._KEYS.SPECTRA
# Make sure that a source is given, and is valid (nor erroneous)
source = self._check_cat_dict_source(Spectrum, spec_key, **kwargs)
if source is None:
return None
# Try to create a new instance of `Spectrum`
new_spectrum = self._init_cat_dict(Spectrum, spec_key, **kwargs)
if new_spectrum is None:
return None
is_dupe = False
for item in self.get(spec_key, []):
# Only the `filename` should be compared for duplicates. If a
# duplicate is found, that means the previous `exclude` array
# should be saved to the new object, and the old deleted
if new_spectrum.is_duplicate_of(item):
if SPECTRUM.EXCLUDE in new_spectrum:
item[SPECTRUM.EXCLUDE] = new_spectrum[SPECTRUM.EXCLUDE]
elif SPECTRUM.EXCLUDE in item:
item.update(new_spectrum)
is_dupe = True
break
if not is_dupe:
self.setdefault(spec_key, []).append(new_spectrum)
return
|
Check that the entry has the required fields.
def check(self):
"""Check that the entry has the required fields."""
# Make sure there is a schema key in dict
if self._KEYS.SCHEMA not in self:
self[self._KEYS.SCHEMA] = self.catalog.SCHEMA.URL
# Make sure there is a name key in dict
if (self._KEYS.NAME not in self or len(self[self._KEYS.NAME]) == 0):
raise ValueError("Entry name is empty:\n\t{}".format(
json.dumps(
self, indent=2)))
return
|
Retrieve the aliases of this object as a list of strings.
Arguments
---------
includename : bool
Include the 'name' parameter in the list of aliases.
def get_aliases(self, includename=True):
"""Retrieve the aliases of this object as a list of strings.
Arguments
---------
includename : bool
Include the 'name' parameter in the list of aliases.
"""
# empty list if doesnt exist
alias_quanta = self.get(self._KEYS.ALIAS, [])
aliases = [aq[QUANTITY.VALUE] for aq in alias_quanta]
if includename and self[self._KEYS.NAME] not in aliases:
aliases = [self[self._KEYS.NAME]] + aliases
return aliases
|
Retrieve the raw text from a file.
def get_entry_text(self, fname):
"""Retrieve the raw text from a file."""
if fname.split('.')[-1] == 'gz':
with gz.open(fname, 'rt') as f:
filetext = f.read()
else:
with codecs.open(fname, 'r') as f:
filetext = f.read()
return filetext
|
Given an alias, find the corresponding source in this entry.
If the given alias doesn't exist (e.g. there are no sources), then a
`ValueError` is raised.
Arguments
---------
alias : str
The str-integer (e.g. '8') of the target source.
Returns
-------
source : `astrocats.catalog.source.Source` object
The source object corresponding to the passed alias.
def get_source_by_alias(self, alias):
"""Given an alias, find the corresponding source in this entry.
If the given alias doesn't exist (e.g. there are no sources), then a
`ValueError` is raised.
Arguments
---------
alias : str
The str-integer (e.g. '8') of the target source.
Returns
-------
source : `astrocats.catalog.source.Source` object
The source object corresponding to the passed alias.
"""
for source in self.get(self._KEYS.SOURCES, []):
if source[self._KEYS.ALIAS] == alias:
return source
raise ValueError("Source '{}': alias '{}' not found!".format(self[
self._KEYS.NAME], alias))
|
Get a new `Entry` which contains the 'stub' of this one.
The 'stub' is only the name and aliases.
Usage:
-----
To convert a normal entry into a stub (for example), overwrite the
entry in place, i.e.
>>> entries[name] = entries[name].get_stub()
Returns
-------
stub : `astrocats.catalog.entry.Entry` subclass object
The type of the returned object is this instance's type.
def get_stub(self):
"""Get a new `Entry` which contains the 'stub' of this one.
The 'stub' is only the name and aliases.
Usage:
-----
To convert a normal entry into a stub (for example), overwrite the
entry in place, i.e.
>>> entries[name] = entries[name].get_stub()
Returns
-------
stub : `astrocats.catalog.entry.Entry` subclass object
The type of the returned object is this instance's type.
"""
stub = type(self)(self.catalog, self[self._KEYS.NAME], stub=True)
if self._KEYS.ALIAS in self:
stub[self._KEYS.ALIAS] = self[self._KEYS.ALIAS]
if self._KEYS.DISTINCT_FROM in self:
stub[self._KEYS.DISTINCT_FROM] = self[self._KEYS.DISTINCT_FROM]
if self._KEYS.RA in self:
stub[self._KEYS.RA] = self[self._KEYS.RA]
if self._KEYS.DEC in self:
stub[self._KEYS.DEC] = self[self._KEYS.DEC]
if self._KEYS.DISCOVER_DATE in self:
stub[self._KEYS.DISCOVER_DATE] = self[self._KEYS.DISCOVER_DATE]
if self._KEYS.SOURCES in self:
stub[self._KEYS.SOURCES] = self[self._KEYS.SOURCES]
return stub
|
Check if attribute has been marked as being erroneous.
def is_erroneous(self, field, sources):
"""Check if attribute has been marked as being erroneous."""
if self._KEYS.ERRORS in self:
my_errors = self[self._KEYS.ERRORS]
for alias in sources.split(','):
source = self.get_source_by_alias(alias)
bib_err_values = [
err[ERROR.VALUE] for err in my_errors
if err[ERROR.KIND] == SOURCE.BIBCODE and
err[ERROR.EXTRA] == field
]
if (SOURCE.BIBCODE in source and
source[SOURCE.BIBCODE] in bib_err_values):
return True
name_err_values = [
err[ERROR.VALUE] for err in my_errors
if err[ERROR.KIND] == SOURCE.NAME and err[ERROR.EXTRA] ==
field
]
if (SOURCE.NAME in source and
source[SOURCE.NAME] in name_err_values):
return True
return False
|
Check if attribute is private.
def is_private(self, key, sources):
"""Check if attribute is private."""
# aliases are always public.
if key == ENTRY.ALIAS:
return False
return all([
SOURCE.PRIVATE in self.get_source_by_alias(x)
for x in sources.split(',')
])
|
Sanitize the data (sort it, etc.) before writing it to disk.
Template method that can be overridden in each catalog's subclassed
`Entry` object.
def sanitize(self):
"""Sanitize the data (sort it, etc.) before writing it to disk.
Template method that can be overridden in each catalog's subclassed
`Entry` object.
"""
name = self[self._KEYS.NAME]
aliases = self.get_aliases(includename=False)
if name not in aliases:
# Assign the first source to alias, if not available assign us.
if self._KEYS.SOURCES in self:
self.add_quantity(self._KEYS.ALIAS, name, '1')
if self._KEYS.ALIAS not in self:
source = self.add_self_source()
self.add_quantity(self._KEYS.ALIAS, name, source)
else:
source = self.add_self_source()
self.add_quantity(self._KEYS.ALIAS, name, source)
if self._KEYS.ALIAS in self:
self[self._KEYS.ALIAS].sort(
key=lambda key: alias_priority(name, key[QUANTITY.VALUE]))
else:
self._log.error(
'There should be at least one alias for `{}`.'.format(name))
if self._KEYS.PHOTOMETRY in self:
self[self._KEYS.PHOTOMETRY].sort(
key=lambda x: ((float(x[PHOTOMETRY.TIME]) if
isinstance(x[PHOTOMETRY.TIME],
(basestring, float, int))
else min([float(y) for y in
x[PHOTOMETRY.TIME]])) if
PHOTOMETRY.TIME in x else 0.0,
x[PHOTOMETRY.BAND] if PHOTOMETRY.BAND in
x else '',
float(x[PHOTOMETRY.MAGNITUDE]) if
PHOTOMETRY.MAGNITUDE in x else ''))
if (self._KEYS.SPECTRA in self and list(
filter(None, [
SPECTRUM.TIME in x for x in self[self._KEYS.SPECTRA]
]))):
self[self._KEYS.SPECTRA].sort(
key=lambda x: (float(x[SPECTRUM.TIME]) if
SPECTRUM.TIME in x else 0.0,
x[SPECTRUM.FILENAME] if
SPECTRUM.FILENAME in x else '')
)
if self._KEYS.SOURCES in self:
# Remove orphan sources
source_aliases = [
x[SOURCE.ALIAS] for x in self[self._KEYS.SOURCES]
]
# Sources with the `PRIVATE` attribute are always retained
source_list = [
x[SOURCE.ALIAS] for x in self[self._KEYS.SOURCES]
if SOURCE.PRIVATE in x
]
for key in self:
# if self._KEYS.get_key_by_name(key).no_source:
if (key in [
self._KEYS.NAME, self._KEYS.SCHEMA, self._KEYS.SOURCES,
self._KEYS.ERRORS
]):
continue
for item in self[key]:
source_list += item[item._KEYS.SOURCE].split(',')
new_src_list = sorted(
list(set(source_aliases).intersection(source_list)))
new_sources = []
for source in self[self._KEYS.SOURCES]:
if source[SOURCE.ALIAS] in new_src_list:
new_sources.append(source)
else:
self._log.info('Removing orphaned source from `{}`.'
.format(name))
if not new_sources:
del self[self._KEYS.SOURCES]
self[self._KEYS.SOURCES] = new_sources
|
Write entry to JSON file in the proper location.
Arguments
---------
bury : bool
final : bool
If this is the 'final' save, perform additional sanitization and
cleaning operations.
def save(self, bury=False, final=False):
"""Write entry to JSON file in the proper location.
Arguments
---------
bury : bool
final : bool
If this is the 'final' save, perform additional sanitization and
cleaning operations.
"""
outdir, filename = self._get_save_path(bury=bury)
if final:
self.sanitize()
# FIX: use 'dump' not 'dumps'
jsonstring = json.dumps(
{
self[self._KEYS.NAME]: self._ordered(self)
},
indent='\t' if sys.version_info[0] >= 3 else 4,
separators=(',', ':'),
ensure_ascii=False)
if not os.path.isdir(outdir):
raise RuntimeError("Output directory '{}' for event '{}' does "
"not exist.".format(outdir, self[
self._KEYS.NAME]))
save_name = os.path.join(outdir, filename + '.json')
with codecs.open(save_name, 'w', encoding='utf8') as sf:
sf.write(jsonstring)
if not os.path.exists(save_name):
raise RuntimeError("File '{}' was not saved!".format(save_name))
return save_name
|
Used to sort keys when writing Entry to JSON format.
Should be supplemented/overridden by inheriting classes.
def sort_func(self, key):
"""Used to sort keys when writing Entry to JSON format.
Should be supplemented/overridden by inheriting classes.
"""
if key == self._KEYS.SCHEMA:
return 'aaa'
if key == self._KEYS.NAME:
return 'aab'
if key == self._KEYS.SOURCES:
return 'aac'
if key == self._KEYS.ALIAS:
return 'aad'
if key == self._KEYS.MODELS:
return 'aae'
if key == self._KEYS.PHOTOMETRY:
return 'zzy'
if key == self._KEYS.SPECTRA:
return 'zzz'
return key
|
Set photometry dictionary from a counts measurement.
def set_pd_mag_from_counts(photodict,
c='',
ec='',
lec='',
uec='',
zp=DEFAULT_ZP,
sig=DEFAULT_UL_SIGMA):
"""Set photometry dictionary from a counts measurement."""
with localcontext() as ctx:
if lec == '' or uec == '':
lec = ec
uec = ec
prec = max(
get_sig_digits(str(c), strip_zeroes=False),
get_sig_digits(str(lec), strip_zeroes=False),
get_sig_digits(str(uec), strip_zeroes=False)) + 1
ctx.prec = prec
dlec = Decimal(str(lec))
duec = Decimal(str(uec))
if c != '':
dc = Decimal(str(c))
dzp = Decimal(str(zp))
dsig = Decimal(str(sig))
photodict[PHOTOMETRY.ZERO_POINT] = str(zp)
if c == '' or float(c) < float(sig) * float(uec):
photodict[PHOTOMETRY.UPPER_LIMIT] = True
photodict[PHOTOMETRY.UPPER_LIMIT_SIGMA] = str(sig)
photodict[PHOTOMETRY.MAGNITUDE] = str(dzp - (D25 * (dsig * duec
).log10()))
dnec = Decimal('10.0') ** (
(dzp - Decimal(photodict[PHOTOMETRY.MAGNITUDE])) / D25)
photodict[PHOTOMETRY.E_UPPER_MAGNITUDE] = str(D25 * (
(dnec + duec).log10() - dnec.log10()))
else:
photodict[PHOTOMETRY.MAGNITUDE] = str(dzp - D25 * dc.log10())
photodict[PHOTOMETRY.E_UPPER_MAGNITUDE] = str(D25 * (
(dc + duec).log10() - dc.log10()))
photodict[PHOTOMETRY.E_LOWER_MAGNITUDE] = str(D25 * (
dc.log10() - (dc - dlec).log10()))
|
Set photometry dictionary from a flux density measurement.
`fd` is assumed to be in microjanskys.
def set_pd_mag_from_flux_density(photodict,
fd='',
efd='',
lefd='',
uefd='',
sig=DEFAULT_UL_SIGMA):
"""Set photometry dictionary from a flux density measurement.
`fd` is assumed to be in microjanskys.
"""
with localcontext() as ctx:
if lefd == '' or uefd == '':
lefd = efd
uefd = efd
prec = max(
get_sig_digits(str(fd), strip_zeroes=False),
get_sig_digits(str(lefd), strip_zeroes=False),
get_sig_digits(str(uefd), strip_zeroes=False)) + 1
ctx.prec = prec
dlefd = Decimal(str(lefd))
duefd = Decimal(str(uefd))
if fd != '':
dfd = Decimal(str(fd))
dsig = Decimal(str(sig))
if fd == '' or float(fd) < DEFAULT_UL_SIGMA * float(uefd):
photodict[PHOTOMETRY.UPPER_LIMIT] = True
photodict[PHOTOMETRY.UPPER_LIMIT_SIGMA] = str(sig)
photodict[PHOTOMETRY.MAGNITUDE] = str(Decimal('23.9') - D25 * (
dsig * duefd).log10())
if fd:
photodict[PHOTOMETRY.E_UPPER_MAGNITUDE] = str(D25 * (
(dfd + duefd).log10() - dfd.log10()))
else:
photodict[PHOTOMETRY.MAGNITUDE] = str(Decimal('23.9') - D25 *
dfd.log10())
photodict[PHOTOMETRY.E_UPPER_MAGNITUDE] = str(D25 * (
(dfd + duefd).log10() - dfd.log10()))
photodict[PHOTOMETRY.E_LOWER_MAGNITUDE] = str(D25 * (
dfd.log10() - (dfd - dlefd).log10()))
|
Check that entry attributes are legal.
def _check(self):
"""Check that entry attributes are legal."""
# Run the super method
super(Photometry, self)._check()
err_str = None
has_flux = self._KEYS.FLUX in self
has_flux_dens = self._KEYS.FLUX_DENSITY in self
has_u_flux = self._KEYS.U_FLUX in self
has_u_flux_dens = self._KEYS.U_FLUX_DENSITY in self
has_freq = self._KEYS.FREQUENCY in self
has_band = self._KEYS.BAND in self
has_ener = self._KEYS.ENERGY in self
has_u_freq = self._KEYS.U_FREQUENCY in self
has_u_ener = self._KEYS.U_ENERGY in self
if has_flux or has_flux_dens:
if not any([has_freq, has_band, has_ener]):
err_str = ("Has `{}` or `{}`".format(self._KEYS.FLUX,
self._KEYS.FLUX_DENSITY) +
" but None of `{}`, `{}`, `{}`".format(
self._KEYS.FREQUENCY, self._KEYS.BAND,
self._KEYS.ENERGY))
elif has_flux and not has_u_flux:
err_str = "`{}` provided without `{}`.".format(
self._KEYS.FLUX, self._KEYS.U_FLUX)
elif has_flux_dens and not has_u_flux_dens:
err_str = "`{}` provided without `{}`.".format(
self._KEYS.FLUX_DENSITY, self._KEYS.U_FLUX_DENSITY)
elif has_freq and not has_u_freq:
err_str = "`{}` provided without `{}`.".format(
self._KEYS.FREQUENCY, self._KEYS.U_FREQUENCY)
elif has_ener and not has_u_ener:
err_str = "`{}` provided without `{}`.".format(
self._KEYS.ENERGY, self._KEYS.U_ENERGY)
if err_str is not None:
raise ValueError(err_str)
return
|
Specify order for attributes.
def sort_func(self, key):
"""Specify order for attributes."""
if key == self._KEYS.TIME:
return 'aaa'
if key == self._KEYS.MODEL:
return 'zzy'
if key == self._KEYS.SOURCE:
return 'zzz'
return key
|
Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = get_url()
context.configure(
url=url,
version_table="alembic_ziggurat_foundations_version",
transaction_per_migration=True,
)
with context.begin_transaction():
context.run_migrations()
|
Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
engine = create_engine(get_url())
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata,
version_table="alembic_ziggurat_foundations_version",
transaction_per_migration=True,
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
|
return all instances by user name, perm name and resource id
:param user_id:
:param perm_name:
:param resource_id:
:param db_session:
:return:
def by_resource_user_and_perm(
cls, user_id, perm_name, resource_id, db_session=None
):
"""
return all instances by user name, perm name and resource id
:param user_id:
:param perm_name:
:param resource_id:
:param db_session:
:return:
"""
db_session = get_db_session(db_session)
query = db_session.query(cls.model).filter(cls.model.user_id == user_id)
query = query.filter(cls.model.resource_id == resource_id)
query = query.filter(cls.model.perm_name == perm_name)
return query.first()
|
Get the next sensor while iterating.
:return: a dict with the keys: protocol, model, id, datatypes.
def tdSensor(self):
"""Get the next sensor while iterating.
:return: a dict with the keys: protocol, model, id, datatypes.
"""
protocol = create_string_buffer(20)
model = create_string_buffer(20)
sid = c_int()
datatypes = c_int()
self._lib.tdSensor(protocol, sizeof(protocol), model, sizeof(model),
byref(sid), byref(datatypes))
return {'protocol': self._to_str(protocol),
'model': self._to_str(model),
'id': sid.value, 'datatypes': datatypes.value}
|
Get the sensor value for a given sensor.
:return: a dict with the keys: value, timestamp.
def tdSensorValue(self, protocol, model, sid, datatype):
"""Get the sensor value for a given sensor.
:return: a dict with the keys: value, timestamp.
"""
value = create_string_buffer(20)
timestamp = c_int()
self._lib.tdSensorValue(protocol, model, sid, datatype,
value, sizeof(value), byref(timestamp))
return {'value': self._to_str(value), 'timestamp': timestamp.value}
|
Get the next controller while iterating.
:return: a dict with the keys: id, type, name, available.
def tdController(self):
"""Get the next controller while iterating.
:return: a dict with the keys: id, type, name, available.
"""
cid = c_int()
ctype = c_int()
name = create_string_buffer(255)
available = c_int()
self._lib.tdController(byref(cid), byref(ctype), name, sizeof(name),
byref(available))
return {'id': cid.value, 'type': ctype.value,
'name': self._to_str(name), 'available': available.value}
|
schemes contains a list of replace this list with the hash(es) you wish
to support.
this example sets pbkdf2_sha256 as the default,
with support for legacy bcrypt hashes.
:param schemes:
:return: CryptContext()
def make_passwordmanager(schemes=None):
"""
schemes contains a list of replace this list with the hash(es) you wish
to support.
this example sets pbkdf2_sha256 as the default,
with support for legacy bcrypt hashes.
:param schemes:
:return: CryptContext()
"""
from passlib.context import CryptContext
if not schemes:
schemes = ["pbkdf2_sha256", "bcrypt"]
pwd_context = CryptContext(schemes=schemes, deprecated="auto")
return pwd_context
|
This function handles attaching model to service if model has one specified
as `_ziggurat_service`, Also attached a proxy object holding all model
definitions that services might use
:param args:
:param kwargs:
:param passwordmanager, the password manager to override default one
:param passwordmanager_schemes, list of schemes for default
passwordmanager to use
:return:
def ziggurat_model_init(
user=None,
group=None,
user_group=None,
group_permission=None,
user_permission=None,
user_resource_permission=None,
group_resource_permission=None,
resource=None,
external_identity=None,
*args,
**kwargs
):
"""
This function handles attaching model to service if model has one specified
as `_ziggurat_service`, Also attached a proxy object holding all model
definitions that services might use
:param args:
:param kwargs:
:param passwordmanager, the password manager to override default one
:param passwordmanager_schemes, list of schemes for default
passwordmanager to use
:return:
"""
models = ModelProxy()
models.User = user
models.Group = group
models.UserGroup = user_group
models.GroupPermission = group_permission
models.UserPermission = user_permission
models.UserResourcePermission = user_resource_permission
models.GroupResourcePermission = group_resource_permission
models.Resource = resource
models.ExternalIdentity = external_identity
model_service_mapping = import_model_service_mappings()
if kwargs.get("passwordmanager"):
user.passwordmanager = kwargs["passwordmanager"]
else:
user.passwordmanager = make_passwordmanager(
kwargs.get("passwordmanager_schemes")
)
for name, cls in models.items():
# if model has a manager attached attached the class also to manager
services = model_service_mapping.get(name, [])
for service in services:
setattr(service, "model", cls)
setattr(service, "models_proxy", models)
|
Show messages for the given query or day.
def messages(request, year=None, month=None, day=None,
template="gnotty/messages.html"):
"""
Show messages for the given query or day.
"""
query = request.REQUEST.get("q")
prev_url, next_url = None, None
messages = IRCMessage.objects.all()
if hide_joins_and_leaves(request):
messages = messages.filter(join_or_leave=False)
if query:
search = Q(message__icontains=query) | Q(nickname__icontains=query)
messages = messages.filter(search).order_by("-message_time")
elif year and month and day:
messages = messages.filter(message_time__year=year,
message_time__month=month,
message_time__day=day)
day_delta = timedelta(days=1)
this_date = date(int(year), int(month), int(day))
prev_date = this_date - day_delta
next_date = this_date + day_delta
prev_url = reverse("gnotty_day", args=prev_date.timetuple()[:3])
next_url = reverse("gnotty_day", args=next_date.timetuple()[:3])
else:
return redirect("gnotty_year", year=datetime.now().year)
context = dict(settings)
context["messages"] = messages
context["prev_url"] = prev_url
context["next_url"] = next_url
return render(request, template, context)
|
Show calendar months for the given year/month.
def calendar(request, year=None, month=None, template="gnotty/calendar.html"):
"""
Show calendar months for the given year/month.
"""
try:
year = int(year)
except TypeError:
year = datetime.now().year
lookup = {"message_time__year": year}
if month:
lookup["message_time__month"] = month
if hide_joins_and_leaves(request):
lookup["join_or_leave"] = False
messages = IRCMessage.objects.filter(**lookup)
try:
dates = messages.datetimes("message_time", "day")
except AttributeError:
dates = messages.dates("message_time", "day")
days = [d.date() for d in dates]
months = []
if days:
min_date, max_date = days[0], days[-1]
days = set(days)
calendar = Calendar(SUNDAY)
for m in range(1, 13) if not month else [int(month)]:
lt_max = m <= max_date.month or year < max_date.year
gt_min = m >= min_date.month or year > min_date.year
if lt_max and gt_min:
weeks = calendar.monthdatescalendar(year, m)
for w, week in enumerate(weeks):
for d, day in enumerate(week):
weeks[w][d] = {
"date": day,
"in_month": day.month == m,
"has_messages": day in days,
}
months.append({"month": date(year, m, 1), "weeks": weeks})
context = dict(settings)
context["months"] = months
return render(request, template, context)
|
A helper for decorating :class:`bravado.client.SwaggerClient`.
:class:`bravado.client.SwaggerClient` can be extended by creating a class
which wraps all calls to it. This helper is used in a :func:`__getattr__`
to check if the attr exists on the api_client. If the attr does not exist
raise :class:`AttributeError`, if it exists and is not callable return it,
and if it is callable return a partial function calling `func` with `name`.
Example usage:
.. code-block:: python
class SomeClientDecorator(object):
def __init__(self, api_client, ...):
self.api_client = api_client
# First arg should be suffiently unique to not conflict with any of
# the kwargs
def wrap_call(self, client_call_name, *args, **kwargs):
...
def __getattr__(self, name):
return decorate_client(self.api_client, self.wrap_call, name)
:param api_client: the client which is being decorated
:type api_client: :class:`bravado.client.SwaggerClient`
:param func: a callable which accepts `name`, `*args`, `**kwargs`
:type func: callable
:param name: the attribute being accessed
:type name: string
:returns: the attribute from the `api_client` or a partial of `func`
:raises: :class:`AttributeError`
def decorate_client(api_client, func, name):
"""A helper for decorating :class:`bravado.client.SwaggerClient`.
:class:`bravado.client.SwaggerClient` can be extended by creating a class
which wraps all calls to it. This helper is used in a :func:`__getattr__`
to check if the attr exists on the api_client. If the attr does not exist
raise :class:`AttributeError`, if it exists and is not callable return it,
and if it is callable return a partial function calling `func` with `name`.
Example usage:
.. code-block:: python
class SomeClientDecorator(object):
def __init__(self, api_client, ...):
self.api_client = api_client
# First arg should be suffiently unique to not conflict with any of
# the kwargs
def wrap_call(self, client_call_name, *args, **kwargs):
...
def __getattr__(self, name):
return decorate_client(self.api_client, self.wrap_call, name)
:param api_client: the client which is being decorated
:type api_client: :class:`bravado.client.SwaggerClient`
:param func: a callable which accepts `name`, `*args`, `**kwargs`
:type func: callable
:param name: the attribute being accessed
:type name: string
:returns: the attribute from the `api_client` or a partial of `func`
:raises: :class:`AttributeError`
"""
client_attr = getattr(api_client, name)
if not callable(client_attr):
return client_attr
return OperationDecorator(client_attr, functools.partial(func, name))
|
Deletes all expired mutex locks if a ttl is provided.
def delete_expired_locks(self):
"""
Deletes all expired mutex locks if a ttl is provided.
"""
ttl_seconds = self.get_mutex_ttl_seconds()
if ttl_seconds is not None:
DBMutex.objects.filter(creation_time__lte=timezone.now() - timedelta(seconds=ttl_seconds)).delete()
|
Acquires the db mutex lock. Takes the necessary steps to delete any stale locks.
Throws a DBMutexError if it can't acquire the lock.
def start(self):
"""
Acquires the db mutex lock. Takes the necessary steps to delete any stale locks.
Throws a DBMutexError if it can't acquire the lock.
"""
# Delete any expired locks first
self.delete_expired_locks()
try:
with transaction.atomic():
self.lock = DBMutex.objects.create(lock_id=self.lock_id)
except IntegrityError:
raise DBMutexError('Could not acquire lock: {0}'.format(self.lock_id))
|
Releases the db mutex lock. Throws an error if the lock was released before the function finished.
def stop(self):
"""
Releases the db mutex lock. Throws an error if the lock was released before the function finished.
"""
if not DBMutex.objects.filter(id=self.lock.id).exists():
raise DBMutexTimeoutError('Lock {0} expired before function completed'.format(self.lock_id))
else:
self.lock.delete()
|
Decorates a function with the db_mutex decorator by using this class as a context manager around
it.
def decorate_callable(self, func):
"""
Decorates a function with the db_mutex decorator by using this class as a context manager around
it.
"""
def wrapper(*args, **kwargs):
try:
with self:
result = func(*args, **kwargs)
return result
except DBMutexError as e:
if self.suppress_acquisition_exceptions:
LOG.error(e)
else:
raise e
functools.update_wrapper(wrapper, func)
return wrapper
|
Default groupfinder implementaion for pyramid applications
:param userid:
:param request:
:return:
def groupfinder(userid, request):
"""
Default groupfinder implementaion for pyramid applications
:param userid:
:param request:
:return:
"""
if userid and hasattr(request, "user") and request.user:
groups = ["group:%s" % g.id for g in request.user.groups]
return groups
return []
|
Position nodes using ForceAtlas2 force-directed algorithm
Parameters
----------
graph: NetworkX graph
A position will be assigned to every node in G.
pos_list : dict or None optional (default=None)
Initial positions for nodes as a dictionary with node as keys
and values as a coordinate list or tuple. If None, then use
random initial positions.
node_masses : dict or None optional (default=None)
Predefined masses for nodes with node as keys and masses as values.
If None, then use degree of nodes.
iterations : int optional (default=50)
Number of iterations
outbound_attraction_distribution : boolean
Distributes attraction along outbound edges. Hubs attract less and thus are pushed to the borders.
This mode is meant to grant authorities (nodes with a high indegree) a more central position than hubs (nodes with a high outdegree).
This is useful for social networks and web networks, where authorities are sometimes considered more important than hubs
lin_log_mode: boolean
Switch ForceAtlas model from lin-lin to lin-log (tribute to Andreas Noack). Makes clusters more tight
prevent_overlapping: boolean
With this mode enabled, the repulsion is modified so that the nodes do not overlap.
The goal is to produce a more readable and aesthetically pleasing image.
edge_weight_influence: float
How much influence you give to the edges weight. 0 is “no influence” and 1 is “normal”.
jitter_tolerance: float
How much swinging you allow. Above 1 discouraged. Lower gives less speed and more precision
barnes_hut_optimize: boolean
Barnes Hut optimization: n² complexity to n.ln(n) ; allows larger graphs.
barnes_hut_theta: float
Theta of the Barnes Hut optimization
scaling_ratio: float
How much repulsion you want. More makes a more sparse graph.
strong_gravity_mode: boolean
The “Strong gravity” option sets a force that attracts the nodes that are distant from the center more ( is this distance).
This force has the drawback of being so strong that it is sometimes stronger than the other forces.
It may result in a biased placement of the nodes.
However, its advantage is to force a very compact layout, which may be useful for certain purposes.
multithread: boolean
gravity: float
Attracts nodes to the center. Prevents islands from drifting away.
Returns
-------
pos : dict
A dictionary of positions keyed by node
def force_atlas2_layout(graph,
pos_list=None,
node_masses=None,
iterations=100,
outbound_attraction_distribution=False,
lin_log_mode=False,
prevent_overlapping=False,
edge_weight_influence=1.0,
jitter_tolerance=1.0,
barnes_hut_optimize=False,
barnes_hut_theta=1.2,
scaling_ratio=2.0,
strong_gravity_mode=False,
multithread=False,
gravity=1.0):
"""
Position nodes using ForceAtlas2 force-directed algorithm
Parameters
----------
graph: NetworkX graph
A position will be assigned to every node in G.
pos_list : dict or None optional (default=None)
Initial positions for nodes as a dictionary with node as keys
and values as a coordinate list or tuple. If None, then use
random initial positions.
node_masses : dict or None optional (default=None)
Predefined masses for nodes with node as keys and masses as values.
If None, then use degree of nodes.
iterations : int optional (default=50)
Number of iterations
outbound_attraction_distribution : boolean
Distributes attraction along outbound edges. Hubs attract less and thus are pushed to the borders.
This mode is meant to grant authorities (nodes with a high indegree) a more central position than hubs (nodes with a high outdegree).
This is useful for social networks and web networks, where authorities are sometimes considered more important than hubs
lin_log_mode: boolean
Switch ForceAtlas model from lin-lin to lin-log (tribute to Andreas Noack). Makes clusters more tight
prevent_overlapping: boolean
With this mode enabled, the repulsion is modified so that the nodes do not overlap.
The goal is to produce a more readable and aesthetically pleasing image.
edge_weight_influence: float
How much influence you give to the edges weight. 0 is “no influence” and 1 is “normal”.
jitter_tolerance: float
How much swinging you allow. Above 1 discouraged. Lower gives less speed and more precision
barnes_hut_optimize: boolean
Barnes Hut optimization: n² complexity to n.ln(n) ; allows larger graphs.
barnes_hut_theta: float
Theta of the Barnes Hut optimization
scaling_ratio: float
How much repulsion you want. More makes a more sparse graph.
strong_gravity_mode: boolean
The “Strong gravity” option sets a force that attracts the nodes that are distant from the center more ( is this distance).
This force has the drawback of being so strong that it is sometimes stronger than the other forces.
It may result in a biased placement of the nodes.
However, its advantage is to force a very compact layout, which may be useful for certain purposes.
multithread: boolean
gravity: float
Attracts nodes to the center. Prevents islands from drifting away.
Returns
-------
pos : dict
A dictionary of positions keyed by node
"""
assert isinstance(graph, networkx.classes.graph.Graph), "Not a networkx graph"
assert isinstance(pos_list, dict) or (pos_list is None), "pos must be specified as a dictionary, as in networkx"
assert multithread is False, "Not implemented yet"
G = numpy.asarray(networkx.to_numpy_matrix(graph))
pos = None
if pos_list is not None:
pos = numpy.asarray([pos_list[i] for i in graph.nodes()])
masses = None
if node_masses is not None:
masses = numpy.asarray([node_masses[node] for node in graph.nodes()])
assert G.shape == (G.shape[0], G.shape[0]), "G is not 2D square"
assert numpy.all(G.T == G), "G is not symmetric."
# speed and speed efficiency describe a scaling factor of dx and dy
# before x and y are adjusted. These are modified as the
# algorithm runs to help ensure convergence.
speed = 1
speed_efficiency = 1
nodes = []
for i in range(0, G.shape[0]):
n = Node()
if node_masses is None:
n.mass = 1 + numpy.count_nonzero(G[i])
else:
n.mass = masses[i]
n.old_dx = 0
n.old_dy = 0
n.dx = 0
n.dy = 0
if pos is None:
n.x = random.random()
n.y = random.random()
else:
n.x = pos[i][0]
n.y = pos[i][1]
nodes.append(n)
edges = []
es = numpy.asarray(G.nonzero()).T
for e in es:
if e[1] <= e[0]: continue # Avoid duplicate edges
edge = Edge()
edge.node1 = e[0] # The index of the first node in `nodes`
edge.node2 = e[1] # The index of the second node in `nodes`
edge.weight = G[tuple(e)]
edges.append(edge)
repulsion = get_repulsion(prevent_overlapping, scaling_ratio)
if strong_gravity_mode:
gravity_force = get_strong_gravity(scaling_ratio)
else:
gravity_force = repulsion
if outbound_attraction_distribution:
outbound_att_compensation = numpy.mean([n.mass for n in nodes])
attraction_coef = outbound_att_compensation if outbound_attraction_distribution else 1
attraction = get_attraction(lin_log_mode, outbound_attraction_distribution, prevent_overlapping,
attraction_coef)
# Main loop
for _i in range(0, iterations):
for n in nodes:
n.old_dx = n.dx
n.old_dy = n.dy
n.dx = 0
n.dy = 0
# Barnes Hut optimization
root_region = None
if barnes_hut_optimize:
root_region = Quadtree(nodes)
root_region.build()
apply_repulsion(repulsion, nodes, barnes_hut_optimize=barnes_hut_optimize, barnes_hut_theta=barnes_hut_theta,
region=root_region)
apply_gravity(gravity_force, nodes, gravity, scaling_ratio)
apply_attraction(attraction, nodes, edges, edge_weight_influence)
# Auto adjust speed.
total_swinging = 0.0 # How much irregular movement
total_effective_traction = 0.0 # How much useful movement
for n in nodes:
swinging = math.sqrt((n.old_dx - n.dx) * (n.old_dx - n.dx) + (n.old_dy - n.dy) * (n.old_dy - n.dy))
total_swinging += n.mass * swinging
total_effective_traction += .5 * n.mass * math.sqrt(
(n.old_dx + n.dx) * (n.old_dx + n.dx) + (n.old_dy + n.dy) * (n.old_dy + n.dy))
# Optimize jitter tolerance.
# The 'right' jitter tolerance for this network.
# Bigger networks need more tolerance. Denser networks need less tolerance.
# Totally empiric.
estimated_optimal_jitter_tolerance = .05 * math.sqrt(len(nodes))
min_jt = math.sqrt(estimated_optimal_jitter_tolerance)
max_jt = 10
jt = jitter_tolerance * max(min_jt, min(max_jt, estimated_optimal_jitter_tolerance * total_effective_traction /
(len(nodes) ** 2)))
min_speed_efficiency = 0.05
# Protective against erratic behavior
if total_swinging / total_effective_traction > 2.0:
if speed_efficiency > min_speed_efficiency:
speed_efficiency *= .5
jt = max(jt, jitter_tolerance)
target_speed = jt * speed_efficiency * total_effective_traction / total_swinging
if total_swinging > jt * total_effective_traction:
if speed_efficiency > min_speed_efficiency:
speed_efficiency *= .7
elif speed < 1000:
speed_efficiency *= 1.3
# But the speed shoudn't rise too much too quickly, since it would
# make the convergence drop dramatically.
max_rise = .5
speed = speed + min(target_speed - speed, max_rise * speed)
# Apply forces.
if prevent_overlapping:
for n in nodes:
swinging = n.mass * math.sqrt(
(n.old_dx - n.dx) * (n.old_dx - n.dx) + (n.old_dy - n.dy) * (n.old_dy - n.dy))
factor = 0.1 * speed / (1 + math.sqrt(speed * swinging))
df = math.sqrt(math.pow(n.dx, 2) + n.dy ** 2)
factor = min(factor * df, 10.) / df
x = n.dx * factor
y = n.dy * factor
else:
for n in nodes:
swinging = n.mass * math.sqrt(
(n.old_dx - n.dx) * (n.old_dx - n.dx) + (n.old_dy - n.dy) * (n.old_dy - n.dy))
factor = speed / (1.0 + math.sqrt(speed * swinging))
n.x = n.x + (n.dx * factor)
n.y = n.y + (n.dy * factor)
positions = [(n.x, n.y) for n in nodes]
return dict(zip(graph.nodes(), positions))
|
Iterate through the nodes or edges and apply the forces directly to the node objects.
def apply_repulsion(repulsion, nodes, barnes_hut_optimize=False, region=None, barnes_hut_theta=1.2):
"""
Iterate through the nodes or edges and apply the forces directly to the node objects.
"""
if not barnes_hut_optimize:
for i in range(0, len(nodes)):
for j in range(0, i):
repulsion.apply_node_to_node(nodes[i], nodes[j])
else:
for i in range(0, len(nodes)):
region.apply_force(nodes[i], repulsion, barnes_hut_theta)
|
Iterate through the nodes or edges and apply the gravity directly to the node objects.
def apply_gravity(repulsion, nodes, gravity, scaling_ratio):
"""
Iterate through the nodes or edges and apply the gravity directly to the node objects.
"""
for i in range(0, len(nodes)):
repulsion.apply_gravitation(nodes[i], gravity / scaling_ratio)
|
Fetch row using primary key -
will use existing object in session if already present
:param external_id:
:param local_user_id:
:param provider_name:
:param db_session:
:return:
def get(cls, external_id, local_user_id, provider_name, db_session=None):
"""
Fetch row using primary key -
will use existing object in session if already present
:param external_id:
:param local_user_id:
:param provider_name:
:param db_session:
:return:
"""
db_session = get_db_session(db_session)
return db_session.query(cls.model).get(
[external_id, local_user_id, provider_name]
)
|
Returns ExternalIdentity instance based on search params
:param external_id:
:param provider_name:
:param db_session:
:return: ExternalIdentity
def by_external_id_and_provider(cls, external_id, provider_name, db_session=None):
"""
Returns ExternalIdentity instance based on search params
:param external_id:
:param provider_name:
:param db_session:
:return: ExternalIdentity
"""
db_session = get_db_session(db_session)
query = db_session.query(cls.model)
query = query.filter(cls.model.external_id == external_id)
query = query.filter(cls.model.provider_name == provider_name)
return query.first()
|
Returns User instance based on search params
:param external_id:
:param provider_name:
:param db_session:
:return: User
def user_by_external_id_and_provider(
cls, external_id, provider_name, db_session=None
):
"""
Returns User instance based on search params
:param external_id:
:param provider_name:
:param db_session:
:return: User
"""
db_session = get_db_session(db_session)
query = db_session.query(cls.models_proxy.User)
query = query.filter(cls.model.external_id == external_id)
query = query.filter(cls.model.provider_name == provider_name)
query = query.filter(cls.models_proxy.User.id == cls.model.local_user_id)
return query.first()
|
return by user and permission name
:param user_id:
:param perm_name:
:param db_session:
:return:
def by_user_and_perm(cls, user_id, perm_name, db_session=None):
"""
return by user and permission name
:param user_id:
:param perm_name:
:param db_session:
:return:
"""
db_session = get_db_session(db_session)
query = db_session.query(cls.model).filter(cls.model.user_id == user_id)
query = query.filter(cls.model.perm_name == perm_name)
return query.first()
|
Checks if cls node has parent with subclass_name.
def node_is_subclass(cls, *subclass_names):
"""Checks if cls node has parent with subclass_name."""
if not isinstance(cls, (ClassDef, Instance)):
return False
# if cls.bases == YES:
# return False
for base_cls in cls.bases:
try:
for inf in base_cls.inferred(): # pragma no branch
if inf.qname() in subclass_names:
return True
if inf != cls and node_is_subclass( # pragma no branch
inf, *subclass_names):
# check up the hierarchy in case we are a subclass of
# a subclass of a subclass ...
return True
except InferenceError: # pragma no cover
continue
return False
|
Checks if a call to a field instance method is valid. A call is
valid if the call is a method of the underlying type. So, in a StringField
the methods from str are valid, in a ListField the methods from list are
valid and so on...
def is_field_method(node):
"""Checks if a call to a field instance method is valid. A call is
valid if the call is a method of the underlying type. So, in a StringField
the methods from str are valid, in a ListField the methods from list are
valid and so on..."""
name = node.attrname
parent = node.last_child()
inferred = safe_infer(parent)
if not inferred:
return False
for cls_name, inst in FIELD_TYPES.items():
if node_is_instance(inferred, cls_name) and hasattr(inst, name):
return True
return False
|
Supposes that node is a mongoengine field in a class and tries to
get its parent class
def get_node_parent_class(node):
"""Supposes that node is a mongoengine field in a class and tries to
get its parent class"""
while node.parent: # pragma no branch
if isinstance(node, ClassDef):
return node
node = node.parent
|
node is a class attribute that is a mongoengine. Returns
the definition statement for the attribute
def get_field_definition(node):
""""node is a class attribute that is a mongoengine. Returns
the definition statement for the attribute
"""
name = node.attrname
cls = get_node_parent_class(node)
definition = cls.lookup(name)[1][0].statement()
return definition
|
Returns de ClassDef for the related embedded document in a
embedded document field.
def get_field_embedded_doc(node):
"""Returns de ClassDef for the related embedded document in a
embedded document field."""
definition = get_field_definition(node)
cls_name = definition.last_child().last_child()
cls = next(cls_name.infer())
return cls
|
Checks if a node is a valid field or method in a embedded document.
def node_is_embedded_doc_attr(node):
"""Checks if a node is a valid field or method in a embedded document.
"""
embedded_doc = get_field_embedded_doc(node.last_child())
name = node.attrname
try:
r = bool(embedded_doc.lookup(name)[1][0])
except IndexError:
r = False
return r
|
This is the method in ``SimpleIRCClient`` that all IRC events
get passed through. Here we map events to our own custom
event handlers, and call them.
def _dispatcher(self, connection, event):
"""
This is the method in ``SimpleIRCClient`` that all IRC events
get passed through. Here we map events to our own custom
event handlers, and call them.
"""
super(BaseBot, self)._dispatcher(connection, event)
for handler in self.events[event.eventtype()]:
handler(self, connection, event)
|
We won't receive our own messages, so log them manually.
def message_channel(self, message):
"""
We won't receive our own messages, so log them manually.
"""
self.log(None, message)
super(BaseBot, self).message_channel(message)
|
Log any public messages, and also handle the command event.
def on_pubmsg(self, connection, event):
"""
Log any public messages, and also handle the command event.
"""
for message in event.arguments():
self.log(event, message)
command_args = filter(None, message.split())
command_name = command_args.pop(0)
for handler in self.events["command"]:
if handler.event.args["command"] == command_name:
self.handle_command_event(event, handler, command_args)
|
Command handler - treats each word in the message
that triggered the command as an argument to the command,
and does some validation to ensure that the number of
arguments match.
def handle_command_event(self, event, command, args):
"""
Command handler - treats each word in the message
that triggered the command as an argument to the command,
and does some validation to ensure that the number of
arguments match.
"""
argspec = getargspec(command)
num_all_args = len(argspec.args) - 2 # Ignore self/event args
num_pos_args = num_all_args - len(argspec.defaults or [])
if num_pos_args <= len(args) <= num_all_args:
response = command(self, event, *args)
elif num_all_args == num_pos_args:
s = "s are" if num_all_args != 1 else " is"
response = "%s arg%s required" % (num_all_args, s)
else:
bits = (num_pos_args, num_all_args)
response = "between %s and %s args are required" % bits
response = "%s: %s" % (self.get_nickname(event), response)
self.message_channel(response)
|
Runs each timer handler in a separate greenlet thread.
def handle_timer_event(self, handler):
"""
Runs each timer handler in a separate greenlet thread.
"""
while True:
handler(self)
sleep(handler.event.args["seconds"])
|
Webhook handler - each handler for the webhook event
takes an initial pattern argument for matching the URL
requested. Here we match the URL to the pattern for each
webhook handler, and bail out if it returns a response.
def handle_webhook_event(self, environ, url, params):
"""
Webhook handler - each handler for the webhook event
takes an initial pattern argument for matching the URL
requested. Here we match the URL to the pattern for each
webhook handler, and bail out if it returns a response.
"""
for handler in self.events["webhook"]:
urlpattern = handler.event.args["urlpattern"]
if not urlpattern or match(urlpattern, url):
response = handler(self, environ, url, params)
if response:
return response
|
Create the correct device instance based on device type and return it.
:return: a :class:`Device` or :class:`DeviceGroup` instance.
def DeviceFactory(id, lib=None):
"""Create the correct device instance based on device type and return it.
:return: a :class:`Device` or :class:`DeviceGroup` instance.
"""
lib = lib or Library()
if lib.tdGetDeviceType(id) == const.TELLSTICK_TYPE_GROUP:
return DeviceGroup(id, lib=lib)
return Device(id, lib=lib)
|
Dispatch a single callback in the current thread.
:param boolean block: If True, blocks waiting for a callback to come.
:return: True if a callback was processed; otherwise False.
def process_callback(self, block=True):
"""Dispatch a single callback in the current thread.
:param boolean block: If True, blocks waiting for a callback to come.
:return: True if a callback was processed; otherwise False.
"""
try:
(callback, args) = self._queue.get(block=block)
try:
callback(*args)
finally:
self._queue.task_done()
except queue.Empty:
return False
return True
|
Return all known devices.
:return: list of :class:`Device` or :class:`DeviceGroup` instances.
def devices(self):
"""Return all known devices.
:return: list of :class:`Device` or :class:`DeviceGroup` instances.
"""
devices = []
count = self.lib.tdGetNumberOfDevices()
for i in range(count):
device = DeviceFactory(self.lib.tdGetDeviceId(i), lib=self.lib)
devices.append(device)
return devices
|
Return all known sensors.
:return: list of :class:`Sensor` instances.
def sensors(self):
"""Return all known sensors.
:return: list of :class:`Sensor` instances.
"""
sensors = []
try:
while True:
sensor = self.lib.tdSensor()
sensors.append(Sensor(lib=self.lib, **sensor))
except TelldusError as e:
if e.error != const.TELLSTICK_ERROR_DEVICE_NOT_FOUND:
raise
return sensors
|
Return all known controllers.
Requires Telldus core library version >= 2.1.2.
:return: list of :class:`Controller` instances.
def controllers(self):
"""Return all known controllers.
Requires Telldus core library version >= 2.1.2.
:return: list of :class:`Controller` instances.
"""
controllers = []
try:
while True:
controller = self.lib.tdController()
del controller["name"]
del controller["available"]
controllers.append(Controller(lib=self.lib, **controller))
except TelldusError as e:
if e.error != const.TELLSTICK_ERROR_NOT_FOUND:
raise
return controllers
|
Add a new device.
:return: a :class:`Device` or :class:`DeviceGroup` instance.
def add_device(self, name, protocol, model=None, **parameters):
"""Add a new device.
:return: a :class:`Device` or :class:`DeviceGroup` instance.
"""
device = Device(self.lib.tdAddDevice(), lib=self.lib)
try:
device.name = name
device.protocol = protocol
if model:
device.model = model
for key, value in parameters.items():
device.set_parameter(key, value)
# Return correct type
return DeviceFactory(device.id, lib=self.lib)
except Exception:
import sys
exc_info = sys.exc_info()
try:
device.remove()
except:
pass
if "with_traceback" in dir(Exception):
raise exc_info[0].with_traceback(exc_info[1], exc_info[2])
else:
exec("raise exc_info[0], exc_info[1], exc_info[2]")
|
Add a new device group.
:return: a :class:`DeviceGroup` instance.
def add_group(self, name, devices):
"""Add a new device group.
:return: a :class:`DeviceGroup` instance.
"""
device = self.add_device(name, "group")
device.add_to_group(devices)
return device
|
Connect a controller.
def connect_controller(self, vid, pid, serial):
"""Connect a controller."""
self.lib.tdConnectTellStickController(vid, pid, serial)
|
Disconnect a controller.
def disconnect_controller(self, vid, pid, serial):
"""Disconnect a controller."""
self.lib.tdDisconnectTellStickController(vid, pid, serial)
|
Get dict with all set parameters.
def parameters(self):
"""Get dict with all set parameters."""
parameters = {}
for name in self.PARAMETERS:
try:
parameters[name] = self.get_parameter(name)
except AttributeError:
pass
return parameters
|
Get a parameter.
def get_parameter(self, name):
"""Get a parameter."""
default_value = "$%!)(INVALID)(!%$"
value = self.lib.tdGetDeviceParameter(self.id, name, default_value)
if value == default_value:
raise AttributeError(name)
return value
|
Set a parameter.
def set_parameter(self, name, value):
"""Set a parameter."""
self.lib.tdSetDeviceParameter(self.id, name, str(value))
|
Add device(s) to the group.
def add_to_group(self, devices):
"""Add device(s) to the group."""
ids = {d.id for d in self.devices_in_group()}
ids.update(self._device_ids(devices))
self._set_group(ids)
|
Remove device(s) from the group.
def remove_from_group(self, devices):
"""Remove device(s) from the group."""
ids = {d.id for d in self.devices_in_group()}
ids.difference_update(self._device_ids(devices))
self._set_group(ids)
|
Fetch list of devices in group.
def devices_in_group(self):
"""Fetch list of devices in group."""
try:
devices = self.get_parameter('devices')
except AttributeError:
return []
ctor = DeviceFactory
return [ctor(int(x), lib=self.lib) for x in devices.split(',') if x]
|
Return the :class:`SensorValue` for the given data type.
sensor.value(TELLSTICK_TEMPERATURE) is identical to calling
sensor.temperature().
def value(self, datatype):
"""Return the :class:`SensorValue` for the given data type.
sensor.value(TELLSTICK_TEMPERATURE) is identical to calling
sensor.temperature().
"""
value = self.lib.tdSensorValue(
self.protocol, self.model, self.id, datatype)
return SensorValue(datatype, value['value'], value['timestamp'])
|
Move any off curves at the end of the contour
to the beginning of the contour. This makes
segmentation easier.
def _prepPointsForSegments(points):
"""
Move any off curves at the end of the contour
to the beginning of the contour. This makes
segmentation easier.
"""
while 1:
point = points[-1]
if point.segmentType:
break
else:
point = points.pop()
points.insert(0, point)
continue
break
|
Reverse the points. This differs from the
reversal point pen in RoboFab in that it doesn't
worry about maintaing the start point position.
That has no benefit within the context of this module.
def _reversePoints(points):
"""
Reverse the points. This differs from the
reversal point pen in RoboFab in that it doesn't
worry about maintaing the start point position.
That has no benefit within the context of this module.
"""
# copy the points
points = _copyPoints(points)
# find the first on curve type and recycle
# it for the last on curve type
firstOnCurve = None
for index, point in enumerate(points):
if point.segmentType is not None:
firstOnCurve = index
break
lastSegmentType = points[firstOnCurve].segmentType
# reverse the points
points = reversed(points)
# work through the reversed remaining points
final = []
for point in points:
segmentType = point.segmentType
if segmentType is not None:
point.segmentType = lastSegmentType
lastSegmentType = segmentType
final.append(point)
# move any offcurves at the end of the points
# to the start of the points
_prepPointsForSegments(final)
# done
return final
|
Compile points into InputSegment objects.
def _convertPointsToSegments(points, willBeReversed=False):
"""
Compile points into InputSegment objects.
"""
# get the last on curve
previousOnCurve = None
for point in reversed(points):
if point.segmentType is not None:
previousOnCurve = point.coordinates
break
assert previousOnCurve is not None
# gather the segments
offCurves = []
segments = []
for point in points:
# off curve, hold.
if point.segmentType is None:
offCurves.append(point)
else:
segment = InputSegment(
points=offCurves + [point],
previousOnCurve=previousOnCurve,
willBeReversed=willBeReversed
)
segments.append(segment)
offCurves = []
previousOnCurve = point.coordinates
assert not offCurves
return segments
|
Finds a t value on a curve from a point.
The points must be originaly be a point on the curve.
This will only back trace the t value, needed to split the curve in parts
def _tValueForPointOnCubicCurve(point, cubicCurve, isHorizontal=0):
"""
Finds a t value on a curve from a point.
The points must be originaly be a point on the curve.
This will only back trace the t value, needed to split the curve in parts
"""
pt1, pt2, pt3, pt4 = cubicCurve
a, b, c, d = bezierTools.calcCubicParameters(pt1, pt2, pt3, pt4)
solutions = bezierTools.solveCubic(a[isHorizontal], b[isHorizontal], c[isHorizontal],
d[isHorizontal] - point[isHorizontal])
solutions = [t for t in solutions if 0 <= t < 1]
if not solutions and not isHorizontal:
# can happen that a horizontal line doens intersect, try the vertical
return _tValueForPointOnCubicCurve(point, (pt1, pt2, pt3, pt4), isHorizontal=1)
if len(solutions) > 1:
intersectionLenghts = {}
for t in solutions:
tp = _getCubicPoint(t, pt1, pt2, pt3, pt4)
dist = _distance(tp, point)
intersectionLenghts[dist] = t
minDist = min(intersectionLenghts.keys())
solutions = [intersectionLenghts[minDist]]
return solutions
|
Scale points and optionally convert them to integers.
def _scalePoints(points, scale=1, convertToInteger=True):
"""
Scale points and optionally convert them to integers.
"""
if convertToInteger:
points = [
(int(round(x * scale)), int(round(y * scale)))
for (x, y) in points
]
else:
points = [(x * scale, y * scale) for (x, y) in points]
return points
|
Scale a single point
def _scaleSinglePoint(point, scale=1, convertToInteger=True):
"""
Scale a single point
"""
x, y = point
if convertToInteger:
return int(round(x * scale)), int(round(y * scale))
else:
return (x * scale, y * scale)
|
Flatten the curve segment int a list of points.
The first and last points in the segment must be
on curves. The returned list of points will not
include the first on curve point.
false curves (where the off curves are not any
different from the on curves) must not be sent here.
duplicate points must not be sent here.
def _flattenSegment(segment, approximateSegmentLength=_approximateSegmentLength):
"""
Flatten the curve segment int a list of points.
The first and last points in the segment must be
on curves. The returned list of points will not
include the first on curve point.
false curves (where the off curves are not any
different from the on curves) must not be sent here.
duplicate points must not be sent here.
"""
onCurve1, offCurve1, offCurve2, onCurve2 = segment
if _pointOnLine(onCurve1, onCurve2, offCurve1) and _pointOnLine(onCurve1, onCurve2, offCurve2):
return [onCurve2]
est = _estimateCubicCurveLength(onCurve1, offCurve1, offCurve2, onCurve2) / approximateSegmentLength
flat = []
minStep = 0.1564
step = 1.0 / est
if step > .3:
step = minStep
t = step
while t < 1:
pt = _getCubicPoint(t, onCurve1, offCurve1, offCurve2, onCurve2)
# ignore when point is in the same direction as the on - off curve line
if not _pointOnLine(offCurve2, onCurve2, pt) and not _pointOnLine(onCurve1, offCurve1, pt):
flat.append(pt)
t += step
flat.append(onCurve2)
return flat
|
Estimate the length of this curve by iterating
through it and averaging the length of the flat bits.
def _estimateCubicCurveLength(pt0, pt1, pt2, pt3, precision=10):
"""
Estimate the length of this curve by iterating
through it and averaging the length of the flat bits.
"""
points = []
length = 0
step = 1.0 / precision
factors = range(0, precision + 1)
for i in factors:
points.append(_getCubicPoint(i * step, pt0, pt1, pt2, pt3))
for i in range(len(points) - 1):
pta = points[i]
ptb = points[i + 1]
length += _distance(pta, ptb)
return length
|
(Point, Point) -> Point
Return the point that lies in between the two input points.
def _mid(pt1, pt2):
"""
(Point, Point) -> Point
Return the point that lies in between the two input points.
"""
(x0, y0), (x1, y1) = pt1, pt2
return 0.5 * (x0 + x1), 0.5 * (y0 + y1)
|
Split the segment according the t values
def split(self, tValues):
"""
Split the segment according the t values
"""
if self.segmentType == "curve":
on1 = self.previousOnCurve
off1 = self.points[0].coordinates
off2 = self.points[1].coordinates
on2 = self.points[2].coordinates
return bezierTools.splitCubicAtT(on1, off1, off2, on2, *tValues)
elif self.segmentType == "line":
segments = []
x1, y1 = self.previousOnCurve
x2, y2 = self.points[0].coordinates
dx = x2 - x1
dy = y2 - y1
pp = x1, y1
for t in tValues:
np = (x1+dx*t, y1+dy*t)
segments.append([pp, np])
pp = np
segments.append([pp, (x2, y2)])
return segments
elif self.segmentType == "qcurve":
raise NotImplementedError
else:
raise NotImplementedError
|
get a t values for a given point
required:
the point must be a point on the curve.
in an overlap cause the point will be an intersection points wich is alwasy a point on the curve
def tValueForPoint(self, point):
"""
get a t values for a given point
required:
the point must be a point on the curve.
in an overlap cause the point will be an intersection points wich is alwasy a point on the curve
"""
if self.segmentType == "curve":
on1 = self.previousOnCurve
off1 = self.points[0].coordinates
off2 = self.points[1].coordinates
on2 = self.points[2].coordinates
return _tValueForPointOnCubicCurve(point, (on1, off1, off2, on2))
elif self.segmentType == "line":
return _tValueForPointOnLine(point, (self.previousOnCurve, self.points[0].coordinates))
elif self.segmentType == "qcurve":
raise NotImplementedError
else:
raise NotImplementedError
|
Return a list of normalized InputPoint objects
for the contour drawn with this pen.
def getData(self):
"""
Return a list of normalized InputPoint objects
for the contour drawn with this pen.
"""
# organize the points into segments
# 1. make sure there is an on curve
haveOnCurve = False
for point in self._points:
if point.segmentType is not None:
haveOnCurve = True
break
# 2. move the off curves to front of the list
if haveOnCurve:
_prepPointsForSegments(self._points)
# 3. ignore double points on start and end
firstPoint = self._points[0]
lastPoint = self._points[-1]
if firstPoint.segmentType is not None and lastPoint.segmentType is not None:
if firstPoint.coordinates == lastPoint.coordinates:
if (firstPoint.segmentType in ["line", "move"]):
del self._points[0]
else:
raise AssertionError("Unhandled point type sequence")
# done
return self._points
|
Match if entire input contour matches entire output contour,
allowing for different start point.
def reCurveFromEntireInputContour(self, inputContour):
"""
Match if entire input contour matches entire output contour,
allowing for different start point.
"""
if self.clockwise:
inputFlat = inputContour.clockwiseFlat
else:
inputFlat = inputContour.counterClockwiseFlat
outputFlat = []
for segment in self.segments:
# XXX this could be expensive
assert segment.segmentType == "flat"
outputFlat += segment.points
# test lengths
haveMatch = False
if len(inputFlat) == len(outputFlat):
if inputFlat == outputFlat:
haveMatch = True
else:
inputStart = inputFlat[0]
if inputStart in outputFlat:
# there should be only one occurance of the point
# but handle it just in case
if outputFlat.count(inputStart) > 1:
startIndexes = [index for index, point in enumerate(outputFlat) if point == inputStart]
else:
startIndexes = [outputFlat.index(inputStart)]
# slice and dice to test possible orders
for startIndex in startIndexes:
test = outputFlat[startIndex:] + outputFlat[:startIndex]
if inputFlat == test:
haveMatch = True
break
if haveMatch:
# clear out the flat points
self.segments = []
# replace with the appropriate points from the input
if self.clockwise:
inputSegments = inputContour.clockwiseSegments
else:
inputSegments = inputContour.counterClockwiseSegments
for inputSegment in inputSegments:
self.segments.append(
OutputSegment(
segmentType=inputSegment.segmentType,
points=[
OutputPoint(
coordinates=point.coordinates,
segmentType=point.segmentType,
smooth=point.smooth,
name=point.name,
kwargs=point.kwargs
)
for point in inputSegment.points
],
final=True
)
)
inputSegment.used = True
# reset the direction of the final contour
self.clockwise = inputContour.clockwise
return True
return False
|
Checks if a function definition is a queryset manager created
with the @queryset_manager decorator.
def _is_custom_qs_manager(funcdef):
"""Checks if a function definition is a queryset manager created
with the @queryset_manager decorator."""
decors = getattr(funcdef, 'decorators', None)
if decors:
for dec in decors.get_children():
try:
if dec.name == 'queryset_manager': # pragma no branch
return True
except AttributeError:
continue
return False
|
Checks if the call is being done to a custom queryset manager.
def _is_call2custom_manager(node):
"""Checks if the call is being done to a custom queryset manager."""
called = safe_infer(node.func)
funcdef = getattr(called, '_proxied', None)
return _is_custom_qs_manager(funcdef)
|
Checks if the attribute is a valid attribute for a queryset manager.
def _is_custom_manager_attribute(node):
"""Checks if the attribute is a valid attribute for a queryset manager.
"""
attrname = node.attrname
if not name_is_from_qs(attrname):
return False
for attr in node.get_children():
inferred = safe_infer(attr)
funcdef = getattr(inferred, '_proxied', None)
if _is_custom_qs_manager(funcdef):
return True
return False
|
return by by_user_and_perm and permission name
:param group_id:
:param perm_name:
:param db_session:
:return:
def by_group_and_perm(cls, group_id, perm_name, db_session=None):
"""
return by by_user_and_perm and permission name
:param group_id:
:param perm_name:
:param db_session:
:return:
"""
db_session = get_db_session(db_session)
query = db_session.query(cls.model).filter(cls.model.group_id == group_id)
query = query.filter(cls.model.perm_name == perm_name)
return query.first()
|
Starts the gevent-socketio server.
def serve_forever(django=False):
"""
Starts the gevent-socketio server.
"""
logger = getLogger("irc.dispatch")
logger.setLevel(settings.LOG_LEVEL)
logger.addHandler(StreamHandler())
app = IRCApplication(django)
server = SocketIOServer((settings.HTTP_HOST, settings.HTTP_PORT), app)
print "%s [Bot: %s] listening on %s:%s" % (
settings.GNOTTY_VERSION_STRING,
app.bot.__class__.__name__,
settings.HTTP_HOST,
settings.HTTP_PORT,
)
server.serve_forever()
|
Attempts to shut down a previously started daemon.
def kill(pid_file):
"""
Attempts to shut down a previously started daemon.
"""
try:
with open(pid_file) as f:
os.kill(int(f.read()), 9)
os.remove(pid_file)
except (IOError, OSError):
return False
return True
|
CLI entry point. Parses args and starts the gevent-socketio server.
def run():
"""
CLI entry point. Parses args and starts the gevent-socketio server.
"""
settings.parse_args()
pid_name = "gnotty-%s-%s.pid" % (settings.HTTP_HOST, settings.HTTP_PORT)
pid_file = settings.PID_FILE or os.path.join(gettempdir(), pid_name)
if settings.KILL:
if kill(pid_file):
print "Daemon killed"
else:
print "Could not kill any daemons"
return
elif kill(pid_file):
print "Running daemon killed"
if settings.DAEMON:
daemonize(pid_file)
serve_forever()
|
A WebSocket session has started - create a greenlet to host
the IRC client, and start it.
def on_start(self, host, port, channel, nickname, password):
"""
A WebSocket session has started - create a greenlet to host
the IRC client, and start it.
"""
self.client = WebSocketIRCClient(host, port, channel, nickname,
password, self)
self.spawn(self.client.start)
|
WebSocket was disconnected - leave the IRC channel.
def disconnect(self, *args, **kwargs):
"""
WebSocket was disconnected - leave the IRC channel.
"""
quit_message = "%s %s" % (settings.GNOTTY_VERSION_STRING,
settings.GNOTTY_PROJECT_URL)
self.client.connection.quit(quit_message)
super(IRCNamespace, self).disconnect(*args, **kwargs)
|
Thread (greenlet) that will try and reconnect the bot if
it's not connected.
def bot_watcher(self):
"""
Thread (greenlet) that will try and reconnect the bot if
it's not connected.
"""
default_interval = 5
interval = default_interval
while True:
if not self.bot.connection.connected:
if self.bot.reconnect():
interval = default_interval
else:
interval *= 2
sleep(interval)
|
Passes the request onto a bot with a webhook if the webhook
path is requested.
def respond_webhook(self, environ):
"""
Passes the request onto a bot with a webhook if the webhook
path is requested.
"""
request = FieldStorage(fp=environ["wsgi.input"], environ=environ)
url = environ["PATH_INFO"]
params = dict([(k, request[k].value) for k in request])
try:
if self.bot is None:
raise NotImplementedError
response = self.bot.handle_webhook_event(environ, url, params)
except NotImplementedError:
return 404
except:
self.logger.debug(format_exc())
return 500
return response or 200
|
Serves a static file when Django isn't being used.
def respond_static(self, environ):
"""
Serves a static file when Django isn't being used.
"""
path = os.path.normpath(environ["PATH_INFO"])
if path == "/":
content = self.index()
content_type = "text/html"
else:
path = os.path.join(os.path.dirname(__file__), path.lstrip("/"))
try:
with open(path, "r") as f:
content = f.read()
except IOError:
return 404
content_type = guess_type(path)[0]
return (200, [("Content-Type", content_type)], content)
|
Loads the chat interface template when Django isn't being
used, manually dealing with the Django template bits.
def index(self):
"""
Loads the chat interface template when Django isn't being
used, manually dealing with the Django template bits.
"""
root_dir = os.path.dirname(__file__)
template_dir = os.path.join(root_dir, "templates", "gnotty")
with open(os.path.join(template_dir, "base.html"), "r") as f:
base = f.read()
with open(os.path.join(template_dir, "chat.html"), "r") as f:
base = base.replace("{% block content %}", f.read())
replace = {
"{% block content %}": "",
"{% block extrahead %}": "",
"{% endblock %}": "",
"{% load gnotty_tags %}": "",
"{% extends \"gnotty/base.html\" %}": "",
"{% url gnotty_chat %}": "/",
"{% gnotty_nav %}": "",
"{% templatetag openvariable %}": "{{",
"{% templatetag closevariable %}": "}}",
}
for k, v in replace.items():
base = base.replace(k, v)
for k, v in settings.items():
base = base.replace("{{ %s }}" % k, unicode(v or ""))
return base
|
If we're running Django and ``GNOTTY_LOGIN_REQUIRED`` is set
to ``True``, pull the session cookie from the environment and
validate that the user is authenticated.
def authorized(self, environ):
"""
If we're running Django and ``GNOTTY_LOGIN_REQUIRED`` is set
to ``True``, pull the session cookie from the environment and
validate that the user is authenticated.
"""
if self.django and settings.LOGIN_REQUIRED:
try:
from django.conf import settings as django_settings
from django.contrib.auth import SESSION_KEY
from django.contrib.auth.models import User
from django.contrib.sessions.models import Session
from django.core.exceptions import ObjectDoesNotExist
cookie = SimpleCookie(environ["HTTP_COOKIE"])
cookie_name = django_settings.SESSION_COOKIE_NAME
session_key = cookie[cookie_name].value
session = Session.objects.get(session_key=session_key)
user_id = session.get_decoded().get(SESSION_KEY)
user = User.objects.get(id=user_id)
except (ImportError, KeyError, ObjectDoesNotExist):
return False
return True
|
Write your forwards methods here.
def forwards(self, orm):
"Write your forwards methods here."
# Note: Remember to use orm['appname.ModelName'] rather than "from appname.models..."
if not db.dry_run:
orm['gnotty.IRCMessage'].objects.filter(message="joins").update(join_or_leave=True)
orm['gnotty.IRCMessage'].objects.filter(message="leaves").update(join_or_leave=True)
|
Returns permission tuples that match one of passed permission names
perm_names - list of permissions that can be matched
user_ids - restrict to specific users
group_ids - restrict to specific groups
resource_ids - restrict to specific resources
limit_group_permissions - should be used if we do not want to have
user objects returned for group permissions, this might cause performance
issues for big groups
def resource_permissions_for_users(
models_proxy,
perm_names,
resource_ids=None,
user_ids=None,
group_ids=None,
resource_types=None,
limit_group_permissions=False,
skip_user_perms=False,
skip_group_perms=False,
db_session=None,
):
"""
Returns permission tuples that match one of passed permission names
perm_names - list of permissions that can be matched
user_ids - restrict to specific users
group_ids - restrict to specific groups
resource_ids - restrict to specific resources
limit_group_permissions - should be used if we do not want to have
user objects returned for group permissions, this might cause performance
issues for big groups
"""
db_session = get_db_session(db_session)
# fetch groups and their permissions (possibly with users belonging
# to group if needed)
query = db_session.query(
models_proxy.GroupResourcePermission.perm_name,
models_proxy.User,
models_proxy.Group,
sa.literal("group").label("type"),
models_proxy.Resource,
)
query = query.join(
models_proxy.Group,
models_proxy.Group.id == models_proxy.GroupResourcePermission.group_id,
)
query = query.join(
models_proxy.Resource,
models_proxy.Resource.resource_id
== models_proxy.GroupResourcePermission.resource_id,
)
if limit_group_permissions:
query = query.outerjoin(models_proxy.User, models_proxy.User.id == None) # noqa
else:
query = query.join(
models_proxy.UserGroup,
models_proxy.UserGroup.group_id
== models_proxy.GroupResourcePermission.group_id,
)
query = query.outerjoin(
models_proxy.User, models_proxy.User.id == models_proxy.UserGroup.user_id
)
if resource_ids:
query = query.filter(
models_proxy.GroupResourcePermission.resource_id.in_(resource_ids)
)
if resource_types:
query = query.filter(models_proxy.Resource.resource_type.in_(resource_types))
if perm_names not in ([ANY_PERMISSION], ANY_PERMISSION) and perm_names:
query = query.filter(
models_proxy.GroupResourcePermission.perm_name.in_(perm_names)
)
if group_ids:
query = query.filter(
models_proxy.GroupResourcePermission.group_id.in_(group_ids)
)
if user_ids and not limit_group_permissions:
query = query.filter(models_proxy.UserGroup.user_id.in_(user_ids))
# 2nd query that will fetch users with direct resource permissions
query2 = db_session.query(
models_proxy.UserResourcePermission.perm_name,
models_proxy.User,
models_proxy.Group,
sa.literal("user").label("type"),
models_proxy.Resource,
)
query2 = query2.join(
models_proxy.User,
models_proxy.User.id == models_proxy.UserResourcePermission.user_id,
)
query2 = query2.join(
models_proxy.Resource,
models_proxy.Resource.resource_id
== models_proxy.UserResourcePermission.resource_id,
)
# group needs to be present to work for union, but never actually matched
query2 = query2.outerjoin(models_proxy.Group, models_proxy.Group.id == None) # noqa
if perm_names not in ([ANY_PERMISSION], ANY_PERMISSION) and perm_names:
query2 = query2.filter(
models_proxy.UserResourcePermission.perm_name.in_(perm_names)
)
if resource_ids:
query2 = query2.filter(
models_proxy.UserResourcePermission.resource_id.in_(resource_ids)
)
if resource_types:
query2 = query2.filter(models_proxy.Resource.resource_type.in_(resource_types))
if user_ids:
query2 = query2.filter(
models_proxy.UserResourcePermission.user_id.in_(user_ids)
)
if not skip_group_perms and not skip_user_perms:
query = query.union(query2)
elif skip_group_perms:
query = query2
users = [
PermissionTuple(
row.User,
row.perm_name,
row.type,
row.Group or None,
row.Resource,
False,
True,
)
for row in query
]
return users
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.