text
stringlengths 81
112k
|
|---|
Called after the database is brought into a consistent state with this
object.
def committed(self):
"""
Called after the database is brought into a consistent state with this
object.
"""
if self.__deleting:
self.deleted()
if not self.__legacy__:
self.store.objectCache.uncache(self.storeID, self)
self.__store = None
self.__justCreated = False
|
Update the database to reflect in-memory changes made to this item; for
example, to make it show up in store.query() calls where it is now
valid, but was not the last time it was persisted to the database.
This is called automatically when in 'autocommit mode' (i.e. not in a
transaction) and at the end of each transaction for every object that
has been changed.
def checkpoint(self):
"""
Update the database to reflect in-memory changes made to this item; for
example, to make it show up in store.query() calls where it is now
valid, but was not the last time it was persisted to the database.
This is called automatically when in 'autocommit mode' (i.e. not in a
transaction) and at the end of each transaction for every object that
has been changed.
"""
if self.store is None:
raise NotInStore("You can't checkpoint %r: not in a store" % (self,))
if self.__deleting:
if not self.__everInserted:
# don't issue duplicate SQL and crap; we were created, then
# destroyed immediately.
return
self.store.executeSQL(self._baseDeleteSQL(self.store), [self.storeID])
# re-using OIDs plays havoc with the cache, and with other things
# as well. We need to make sure that we leave a placeholder row at
# the end of the table.
if self.__deletingObject:
# Mark this object as dead.
self.store.executeSchemaSQL(_schema.CHANGE_TYPE,
[-1, self.storeID])
# Can't do this any more:
# self.store.executeSchemaSQL(_schema.DELETE_OBJECT, [self.storeID])
# TODO: need to measure the performance impact of this, then do
# it to make sure things are in fact deleted:
# self.store.executeSchemaSQL(_schema.APP_VACUUM)
else:
assert self.__legacy__
# we're done...
if self.store.autocommit:
self.committed()
return
if self.__everInserted:
# case 1: we've been inserted before, either previously in this
# transaction or we were loaded from the db
if not self.__dirty__:
# we might have been checkpointed twice within the same
# transaction; just don't do anything.
return
self.store.executeSQL(*self._updateSQL())
else:
# case 2: we are in the middle of creating the object, we've never
# been inserted into the db before
schemaAttrs = self.getSchema()
insertArgs = [self.storeID]
for (ignoredName, attrObj) in schemaAttrs:
attrObjDuplicate, attributeValue = self.__dirty__[attrObj.attrname]
# assert attrObjDuplicate is attrObj
insertArgs.append(attributeValue)
# XXX this isn't atomic, gross.
self.store.executeSQL(self._baseInsertSQL(self.store), insertArgs)
self.__everInserted = True
# In case 1, we're dirty but we did an update, synchronizing the
# database, in case 2, we haven't been created but we issue an insert.
# In either case, the code in attributes.py sets the attribute *as well
# as* populating __dirty__, so we clear out dirty and we keep the same
# value, knowing it's the same as what's in the db.
self.__dirty__.clear()
if self.store.autocommit:
self.committed()
|
Register a callable which can perform a schema upgrade between two
particular versions.
@param upgrader: A one-argument callable which will upgrade an object. It
is invoked with an instance of the old version of the object.
@param typeName: The database typename for which this is an upgrader.
@param oldVersion: The version from which this will upgrade.
@param newVersion: The version to which this will upgrade. This must be
exactly one greater than C{oldVersion}.
def registerUpgrader(upgrader, typeName, oldVersion, newVersion):
"""
Register a callable which can perform a schema upgrade between two
particular versions.
@param upgrader: A one-argument callable which will upgrade an object. It
is invoked with an instance of the old version of the object.
@param typeName: The database typename for which this is an upgrader.
@param oldVersion: The version from which this will upgrade.
@param newVersion: The version to which this will upgrade. This must be
exactly one greater than C{oldVersion}.
"""
# assert (typeName, oldVersion, newVersion) not in _upgradeRegistry, "duplicate upgrader"
# ^ this makes the tests blow up so it's just disabled for now; perhaps we
# should have a specific test mode
# assert newVersion - oldVersion == 1, "read the doc string"
assert isinstance(typeName, str), "read the doc string"
_upgradeRegistry[typeName, oldVersion] = upgrader
|
Register an upgrader for C{itemType}, from C{fromVersion} to C{toVersion},
which will copy all attributes from the legacy item to the new item. If
postCopy is provided, it will be called with the new item after upgrading.
@param itemType: L{axiom.item.Item} subclass
@param postCopy: a callable of one argument
@return: None
def registerAttributeCopyingUpgrader(itemType, fromVersion, toVersion, postCopy=None):
"""
Register an upgrader for C{itemType}, from C{fromVersion} to C{toVersion},
which will copy all attributes from the legacy item to the new item. If
postCopy is provided, it will be called with the new item after upgrading.
@param itemType: L{axiom.item.Item} subclass
@param postCopy: a callable of one argument
@return: None
"""
def upgrader(old):
newitem = old.upgradeVersion(itemType.typeName, fromVersion, toVersion,
**dict((str(name), getattr(old, name))
for (name, _) in old.getSchema()))
if postCopy is not None:
postCopy(newitem)
return newitem
registerUpgrader(upgrader, itemType.typeName, fromVersion, toVersion)
|
Register an upgrader for C{itemType}, from C{fromVersion} to C{toVersion},
which will delete the item from the database.
@param itemType: L{axiom.item.Item} subclass
@return: None
def registerDeletionUpgrader(itemType, fromVersion, toVersion):
"""
Register an upgrader for C{itemType}, from C{fromVersion} to C{toVersion},
which will delete the item from the database.
@param itemType: L{axiom.item.Item} subclass
@return: None
"""
# XXX This should actually do something more special so that a new table is
# not created and such.
def upgrader(old):
old.deleteFromStore()
return None
registerUpgrader(upgrader, itemType.typeName, fromVersion, toVersion)
|
Does the given table have an explicit oid column?
def _hasExplicitOid(store, table):
"""
Does the given table have an explicit oid column?
"""
return any(info[1] == 'oid' for info
in store.querySchemaSQL(
'PRAGMA *DATABASE*.table_info({})'.format(table)))
|
Upgrade a table to have an explicit oid.
Must be called in a transaction to avoid corrupting the database.
def _upgradeTableOid(store, table, createTable, postCreate=lambda: None):
"""
Upgrade a table to have an explicit oid.
Must be called in a transaction to avoid corrupting the database.
"""
if _hasExplicitOid(store, table):
return
store.executeSchemaSQL(
'ALTER TABLE *DATABASE*.{0} RENAME TO {0}_temp'.format(table))
createTable()
store.executeSchemaSQL(
'INSERT INTO *DATABASE*.{0} '
'SELECT oid, * FROM *DATABASE*.{0}_temp'.format(table))
store.executeSchemaSQL('DROP TABLE *DATABASE*.{0}_temp'.format(table))
postCreate()
|
Upgrade the system tables to use explicit oid columns.
def upgradeSystemOid(store):
"""
Upgrade the system tables to use explicit oid columns.
"""
store.transact(
_upgradeTableOid, store, 'axiom_types',
lambda: store.executeSchemaSQL(CREATE_TYPES))
store.transact(
_upgradeTableOid, store, 'axiom_objects',
lambda: store.executeSchemaSQL(CREATE_OBJECTS),
lambda: store.executeSchemaSQL(CREATE_OBJECTS_IDX))
|
Upgrade a store to use explicit oid columns.
This allows VACUUMing the database without corrupting it.
This requires copying all of axiom_objects and axiom_types, as well as all
item tables that have not yet been upgraded. Consider VACUUMing the
database afterwards to reclaim space.
def upgradeExplicitOid(store):
"""
Upgrade a store to use explicit oid columns.
This allows VACUUMing the database without corrupting it.
This requires copying all of axiom_objects and axiom_types, as well as all
item tables that have not yet been upgraded. Consider VACUUMing the
database afterwards to reclaim space.
"""
upgradeSystemOid(store)
for typename, version in store.querySchemaSQL(LATEST_TYPES):
cls = _typeNameToMostRecentClass[typename]
if cls.schemaVersion != version:
remaining = store.querySQL(
'SELECT oid FROM {} LIMIT 1'.format(
store._tableNameFor(typename, version)))
if len(remaining) == 0:
# Nothing to upgrade
continue
else:
raise RuntimeError(
'{}:{} not fully upgraded to {}'.format(
typename, version, cls.schemaVersion))
store.transact(
_upgradeTableOid,
store,
store._tableNameOnlyFor(typename, version),
lambda: store._justCreateTable(cls),
lambda: store._createIndexesFor(cls, []))
|
Check that all of the accumulated old Item types have a way to get
from their current version to the latest version.
@raise axiom.errors.NoUpgradePathAvailable: for any, and all, Items
that do not have a valid upgrade path
def checkUpgradePaths(self):
"""
Check that all of the accumulated old Item types have a way to get
from their current version to the latest version.
@raise axiom.errors.NoUpgradePathAvailable: for any, and all, Items
that do not have a valid upgrade path
"""
cantUpgradeErrors = []
for oldVersion in self._oldTypesRemaining:
# We have to be able to get from oldVersion.schemaVersion to
# the most recent type.
currentType = _typeNameToMostRecentClass.get(
oldVersion.typeName, None)
if currentType is None:
# There isn't a current version of this type; it's entirely
# legacy, will be upgraded by deleting and replacing with
# something else.
continue
typeInQuestion = oldVersion.typeName
upgver = oldVersion.schemaVersion
while upgver < currentType.schemaVersion:
# Do we have enough of the schema present to upgrade?
if ((typeInQuestion, upgver)
not in _upgradeRegistry):
cantUpgradeErrors.append(
"No upgrader present for %s (%s) from %d to %d" % (
typeInQuestion, qual(currentType), upgver,
upgver + 1))
# Is there a type available for each upgrader version?
if upgver+1 != currentType.schemaVersion:
if (typeInQuestion, upgver+1) not in _legacyTypes:
cantUpgradeErrors.append(
"Type schema required for upgrade missing:"
" %s version %d" % (
typeInQuestion, upgver+1))
upgver += 1
if cantUpgradeErrors:
raise NoUpgradePathAvailable('\n '.join(cantUpgradeErrors))
|
Queue a type upgrade for C{oldtype}.
def queueTypeUpgrade(self, oldtype):
"""
Queue a type upgrade for C{oldtype}.
"""
if oldtype not in self._oldTypesRemaining:
self._oldTypesRemaining.append(oldtype)
|
Upgrade a legacy item.
@raise axiom.errors.UpgraderRecursion: If the given item is already in
the process of being upgraded.
def upgradeItem(self, thisItem):
"""
Upgrade a legacy item.
@raise axiom.errors.UpgraderRecursion: If the given item is already in
the process of being upgraded.
"""
sid = thisItem.storeID
if sid in self._currentlyUpgrading:
raise UpgraderRecursion()
self._currentlyUpgrading[sid] = thisItem
try:
return upgradeAllTheWay(thisItem)
finally:
self._currentlyUpgrading.pop(sid)
|
Upgrade the entire store in batches, yielding after each batch.
@param n: Number of upgrades to perform per transaction
@type n: C{int}
@raise axiom.errors.ItemUpgradeError: if an item upgrade failed
@return: A generator that yields after each batch upgrade. This needs
to be consumed for upgrading to actually take place.
def upgradeBatch(self, n):
"""
Upgrade the entire store in batches, yielding after each batch.
@param n: Number of upgrades to perform per transaction
@type n: C{int}
@raise axiom.errors.ItemUpgradeError: if an item upgrade failed
@return: A generator that yields after each batch upgrade. This needs
to be consumed for upgrading to actually take place.
"""
store = self.store
def _doBatch(itemType):
upgradedAnything = False
for theItem in store.query(itemType, limit=n):
upgradedAnything = True
try:
self.upgradeItem(theItem)
except:
f = Failure()
raise ItemUpgradeError(
f, theItem.storeID, itemType,
_typeNameToMostRecentClass[itemType.typeName])
return upgradedAnything
if self.upgradesPending:
didAny = False
while self._oldTypesRemaining:
t0 = self._oldTypesRemaining[0]
upgradedAnything = store.transact(_doBatch, t0)
if not upgradedAnything:
self._oldTypesRemaining.pop(0)
if didAny:
msg("%s finished upgrading %s" % (store.dbdir.path, qual(t0)))
continue
elif not didAny:
didAny = True
msg("%s beginning upgrade..." % (store.dbdir.path,))
yield None
if didAny:
msg("%s completely upgraded." % (store.dbdir.path,))
|
Obtains the lvm, vg_t and lv_t handle. Usually you would never need to use this
method unless you are doing operations using the ctypes function wrappers in
conversion.py
*Raises:*
* HandleError
def open(self):
"""
Obtains the lvm, vg_t and lv_t handle. Usually you would never need to use this
method unless you are doing operations using the ctypes function wrappers in
conversion.py
*Raises:*
* HandleError
"""
self.vg.open()
self.__lvh = lvm_lv_from_uuid(self.vg.handle, self.uuid)
if not bool(self.__lvh):
raise HandleError("Failed to initialize LV Handle.")
|
Returns the logical volume name.
def name(self):
"""
Returns the logical volume name.
"""
self.open()
name = lvm_lv_get_name(self.__lvh)
self.close()
return name
|
Returns True if the logical volume is active, False otherwise.
def is_active(self):
"""
Returns True if the logical volume is active, False otherwise.
"""
self.open()
active = lvm_lv_is_active(self.__lvh)
self.close()
return bool(active)
|
Returns True if the logical volume is suspended, False otherwise.
def is_suspended(self):
"""
Returns True if the logical volume is suspended, False otherwise.
"""
self.open()
susp = lvm_lv_is_suspended(self.__lvh)
self.close()
return bool(susp)
|
Returns the logical volume size in the given units. Default units are MiB.
*Args:*
* units (str): Unit label ('MiB', 'GiB', etc...). Default is MiB.
def size(self, units="MiB"):
"""
Returns the logical volume size in the given units. Default units are MiB.
*Args:*
* units (str): Unit label ('MiB', 'GiB', etc...). Default is MiB.
"""
self.open()
size = lvm_lv_get_size(self.__lvh)
self.close()
return size_convert(size, units)
|
Activates the logical volume.
*Raises:*
* HandleError
def activate(self):
"""
Activates the logical volume.
*Raises:*
* HandleError
"""
self.open()
a = lvm_lv_activate(self.handle)
self.close()
if a != 0:
raise CommitError("Failed to activate LV.")
|
Deactivates the logical volume.
*Raises:*
* HandleError
def deactivate(self):
"""
Deactivates the logical volume.
*Raises:*
* HandleError
"""
self.open()
d = lvm_lv_deactivate(self.handle)
self.close()
if d != 0:
raise CommitError("Failed to deactivate LV.")
|
Obtains the lvm, vg_t and pv_t handle. Usually you would never need to use this
method unless you are doing operations using the ctypes function wrappers in
conversion.py
*Raises:*
* HandleError
def open(self):
"""
Obtains the lvm, vg_t and pv_t handle. Usually you would never need to use this
method unless you are doing operations using the ctypes function wrappers in
conversion.py
*Raises:*
* HandleError
"""
self.vg.open()
self.__pvh = lvm_pv_from_uuid(self.vg.handle, self.uuid)
if not bool(self.__pvh):
raise HandleError("Failed to initialize PV Handle.")
|
Returns the physical volume device path.
def name(self):
"""
Returns the physical volume device path.
"""
self.open()
name = lvm_pv_get_name(self.handle)
self.close()
return name
|
Returns the physical volume mda count.
def mda_count(self):
"""
Returns the physical volume mda count.
"""
self.open()
mda = lvm_pv_get_mda_count(self.handle)
self.close()
return mda
|
Returns the physical volume size in the given units. Default units are MiB.
*Args:*
* units (str): Unit label ('MiB', 'GiB', etc...). Default is MiB.
def size(self, units="MiB"):
"""
Returns the physical volume size in the given units. Default units are MiB.
*Args:*
* units (str): Unit label ('MiB', 'GiB', etc...). Default is MiB.
"""
self.open()
size = lvm_pv_get_size(self.handle)
self.close()
return size_convert(size, units)
|
Returns the device size in the given units. Default units are MiB.
*Args:*
* units (str): Unit label ('MiB', 'GiB', etc...). Default is MiB.
def dev_size(self, units="MiB"):
"""
Returns the device size in the given units. Default units are MiB.
*Args:*
* units (str): Unit label ('MiB', 'GiB', etc...). Default is MiB.
"""
self.open()
size = lvm_pv_get_dev_size(self.handle)
self.close()
return size_convert(size, units)
|
Returns the free size in the given units. Default units are MiB.
*Args:*
* units (str): Unit label ('MiB', 'GiB', etc...). Default is MiB.
def free(self, units="MiB"):
"""
Returns the free size in the given units. Default units are MiB.
*Args:*
* units (str): Unit label ('MiB', 'GiB', etc...). Default is MiB.
"""
self.open()
size = lvm_pv_get_free(self.handle)
self.close()
return size_convert(size, units)
|
A wrapper function to validate formdata against mongoengine-field
validator and raise a proper django.forms ValidationError if there
are any problems.
def mongoengine_validate_wrapper(old_clean, new_clean):
"""
A wrapper function to validate formdata against mongoengine-field
validator and raise a proper django.forms ValidationError if there
are any problems.
"""
def inner_validate(value):
value = old_clean(value)
try:
new_clean(value)
return value
except ValidationError, e:
raise forms.ValidationError(e)
return inner_validate
|
walk through the available valid fields..
def iter_valid_fields(meta):
"""walk through the available valid fields.."""
# fetch field configuration and always add the id_field as exclude
meta_fields = getattr(meta, 'fields', ())
meta_exclude = getattr(meta, 'exclude', ())
meta_exclude += (meta.document._meta.get('id_field'),)
# walk through meta_fields or through the document fields to keep
# meta_fields order in the form
if meta_fields:
for field_name in meta_fields:
field = meta.document._fields.get(field_name)
if field:
yield (field_name, field)
else:
for field_name, field in meta.document._fields.iteritems():
# skip excluded fields
if field_name not in meta_exclude:
yield (field_name, field)
|
This function behaves like L{axiom.attributes.reference} but with
an extra behaviour: when this item is installed (via
L{axiom.dependency.installOn} on a target item, the
type named here will be instantiated and installed on the target
as well.
For example::
class Foo(Item):
counter = integer()
thingIDependOn = dependsOn(Baz, lambda baz: baz.setup())
@param itemType: The Item class to instantiate and install.
@param itemCustomizer: A callable that accepts the item installed
as a dependency as its first argument. It will be called only if
an item is created to satisfy this dependency.
@return: An L{axiom.attributes.reference} instance.
def dependsOn(itemType, itemCustomizer=None, doc='',
indexed=True, whenDeleted=reference.NULLIFY):
"""
This function behaves like L{axiom.attributes.reference} but with
an extra behaviour: when this item is installed (via
L{axiom.dependency.installOn} on a target item, the
type named here will be instantiated and installed on the target
as well.
For example::
class Foo(Item):
counter = integer()
thingIDependOn = dependsOn(Baz, lambda baz: baz.setup())
@param itemType: The Item class to instantiate and install.
@param itemCustomizer: A callable that accepts the item installed
as a dependency as its first argument. It will be called only if
an item is created to satisfy this dependency.
@return: An L{axiom.attributes.reference} instance.
"""
frame = sys._getframe(1)
locals = frame.f_locals
# Try to make sure we were called from a class def.
if (locals is frame.f_globals) or ('__module__' not in locals):
raise TypeError("dependsOn can be used only from a class definition.")
ref = reference(reftype=itemType, doc=doc, indexed=indexed, allowNone=True,
whenDeleted=whenDeleted)
if "__dependsOn_advice_data__" not in locals:
addClassAdvisor(_dependsOn_advice)
locals.setdefault('__dependsOn_advice_data__', []).append(
(itemType, itemCustomizer, ref))
return ref
|
Remove this object from the target, as well as any dependencies
that it automatically installed which were not explicitly
"pinned" by calling "install", and raising an exception if
anything still depends on this.
def uninstallFrom(self, target):
"""
Remove this object from the target, as well as any dependencies
that it automatically installed which were not explicitly
"pinned" by calling "install", and raising an exception if
anything still depends on this.
"""
#did this class powerup on any interfaces? powerdown if so.
target.powerDown(self)
for dc in self.store.query(_DependencyConnector,
_DependencyConnector.target==target):
if dc.installee is self:
dc.deleteFromStore()
for item in installedUniqueRequirements(self, target):
uninstallFrom(item, target)
callback = getattr(self, "uninstalled", None)
if callback is not None:
callback()
|
If this item is installed on another item, return the install
target. Otherwise return None.
def installedOn(self):
"""
If this item is installed on another item, return the install
target. Otherwise return None.
"""
try:
return self.store.findUnique(_DependencyConnector,
_DependencyConnector.installee == self
).target
except ItemNotFound:
return None
|
Return an iterable of things installed on the target that
require this item.
def installedDependents(self, target):
"""
Return an iterable of things installed on the target that
require this item.
"""
for dc in self.store.query(_DependencyConnector,
_DependencyConnector.target == target):
depends = dependentsOf(dc.installee.__class__)
if self.__class__ in depends:
yield dc.installee
|
Return an iterable of things installed on the target that this item
requires and are not required by anything else.
def installedUniqueRequirements(self, target):
"""
Return an iterable of things installed on the target that this item
requires and are not required by anything else.
"""
myDepends = dependentsOf(self.__class__)
#XXX optimize?
for dc in self.store.query(_DependencyConnector,
_DependencyConnector.target==target):
if dc.installee is self:
#we're checking all the others not ourself
continue
depends = dependentsOf(dc.installee.__class__)
if self.__class__ in depends:
raise DependencyError(
"%r cannot be uninstalled from %r, "
"%r still depends on it" % (self, target, dc.installee))
for cls in myDepends[:]:
#If one of my dependencies is required by somebody
#else, leave it alone
if cls in depends:
myDepends.remove(cls)
for dc in self.store.query(_DependencyConnector,
_DependencyConnector.target==target):
if (dc.installee.__class__ in myDepends
and not dc.explicitlyInstalled):
yield dc.installee
|
Return an iterable of things installed on the target that this
item requires.
def installedRequirements(self, target):
"""
Return an iterable of things installed on the target that this
item requires.
"""
myDepends = dependentsOf(self.__class__)
for dc in self.store.query(_DependencyConnector,
_DependencyConnector.target == target):
if dc.installee.__class__ in myDepends:
yield dc.installee
|
Adapt a store to L{IServiceCollection}.
@param st: The L{Store} to adapt.
@param pups: A list of L{IServiceCollection} powerups on C{st}.
@return: An L{IServiceCollection} which has all of C{pups} as children.
def storeServiceSpecialCase(st, pups):
"""
Adapt a store to L{IServiceCollection}.
@param st: The L{Store} to adapt.
@param pups: A list of L{IServiceCollection} powerups on C{st}.
@return: An L{IServiceCollection} which has all of C{pups} as children.
"""
if st.parent is not None:
# If for some bizarre reason we're starting a substore's service, let's
# just assume that its parent is running its upgraders, rather than
# risk starting the upgrader run twice. (XXX: it *IS* possible to
# figure out whether we need to or not, I just doubt this will ever
# even happen in practice -- fix here if it does)
return serviceSpecialCase(st, pups)
if st._axiom_service is not None:
# not new, don't add twice.
return st._axiom_service
collection = serviceSpecialCase(st, pups)
st._upgradeService.setServiceParent(collection)
if st.dbdir is not None:
from axiom import batch
batcher = batch.BatchProcessingControllerService(st)
batcher.setServiceParent(collection)
scheduler = iaxiom.IScheduler(st)
# If it's an old database, we might get a SubScheduler instance. It has no
# setServiceParent method.
setServiceParent = getattr(scheduler, 'setServiceParent', None)
if setServiceParent is not None:
setServiceParent(collection)
return collection
|
This function creates (or returns a previously created) L{IScheduler}
powerup.
If L{IScheduler} powerups were found on C{empowered}, the first of those
is given priority. Otherwise, a site L{Store} or a user L{Store} will
have any pre-existing L{IScheduler} powerup associated with them (on the
hackish cache attribute C{_schedulerService}) returned, or a new one
created if none exists already.
def _schedulerServiceSpecialCase(empowered, pups):
"""
This function creates (or returns a previously created) L{IScheduler}
powerup.
If L{IScheduler} powerups were found on C{empowered}, the first of those
is given priority. Otherwise, a site L{Store} or a user L{Store} will
have any pre-existing L{IScheduler} powerup associated with them (on the
hackish cache attribute C{_schedulerService}) returned, or a new one
created if none exists already.
"""
from axiom.scheduler import _SiteScheduler, _UserScheduler
# Give precedence to anything found in the store
for pup in pups:
return pup
# If the empowered is a store, construct a scheduler for it.
if isinstance(empowered, Store):
if getattr(empowered, '_schedulerService', None) is None:
if empowered.parent is None:
sched = _SiteScheduler(empowered)
else:
sched = _UserScheduler(empowered)
empowered._schedulerService = sched
return empowered._schedulerService
return None
|
Format a schema mismatch for human consumption.
@param diskSchema: The on-disk schema.
@param memorySchema: The in-memory schema.
@rtype: L{bytes}
@return: A description of the schema differences.
def _diffSchema(diskSchema, memorySchema):
"""
Format a schema mismatch for human consumption.
@param diskSchema: The on-disk schema.
@param memorySchema: The in-memory schema.
@rtype: L{bytes}
@return: A description of the schema differences.
"""
diskSchema = set(diskSchema)
memorySchema = set(memorySchema)
diskOnly = diskSchema - memorySchema
memoryOnly = memorySchema - diskSchema
diff = []
if diskOnly:
diff.append('Only on disk:')
diff.extend(map(repr, diskOnly))
if memoryOnly:
diff.append('Only in memory:')
diff.extend(map(repr, memoryOnly))
return '\n'.join(diff)
|
Close this file and commit it to its permanent location.
@return: a Deferred which fires when the file has been moved (and
backed up to tertiary storage, if necessary).
def close(self):
"""
Close this file and commit it to its permanent location.
@return: a Deferred which fires when the file has been moved (and
backed up to tertiary storage, if necessary).
"""
now = time.time()
try:
file.close(self)
_mkdirIfNotExists(self._destpath.dirname())
self.finalpath = self._destpath
os.rename(self.name, self.finalpath.path)
os.utime(self.finalpath.path, (now, now))
except:
return defer.fail()
return defer.succeed(self.finalpath)
|
A debugging API, exposing SQLite's I{EXPLAIN} statement.
While this is not a private method, you also probably don't have any
use for it unless you understand U{SQLite
opcodes<http://www.sqlite.org/opcode.html>} very well.
Once you do, it can be handy to call this interactively to get a sense
of the complexity of a query.
@return: a list, the first element of which is a L{str} (the SQL
statement which will be run), and the remainder of which is 3-tuples
resulting from the I{EXPLAIN} of that statement.
def explain(self):
"""
A debugging API, exposing SQLite's I{EXPLAIN} statement.
While this is not a private method, you also probably don't have any
use for it unless you understand U{SQLite
opcodes<http://www.sqlite.org/opcode.html>} very well.
Once you do, it can be handy to call this interactively to get a sense
of the complexity of a query.
@return: a list, the first element of which is a L{str} (the SQL
statement which will be run), and the remainder of which is 3-tuples
resulting from the I{EXPLAIN} of that statement.
"""
return ([self._sqlAndArgs('SELECT', self._queryTarget)[0]] +
self._runQuery('EXPLAIN SELECT', self._queryTarget))
|
Generate the SQL string which follows the "FROM" string and before the
"WHERE" string in the final SQL statement.
def _computeFromClause(self, tables):
"""
Generate the SQL string which follows the "FROM" string and before the
"WHERE" string in the final SQL statement.
"""
tableAliases = []
self.fromClauseParts = []
for table in tables:
# The indirect calls to store.getTableName() will create the tables
# if needed. (XXX That's bad, actually. They should get created
# some other way if necessary. -exarkun)
tableName = table.getTableName(self.store)
tableAlias = table.getTableAlias(self.store, tuple(tableAliases))
if tableAlias is None:
self.fromClauseParts.append(tableName)
else:
tableAliases.append(tableAlias)
self.fromClauseParts.append('%s AS %s' % (tableName,
tableAlias))
self.sortClauseParts = []
for attr, direction in self.sort.orderColumns():
assert direction in ('ASC', 'DESC'), "%r not in ASC,DESC" % (direction,)
if attr.type not in tables:
raise ValueError(
"Ordering references type excluded from comparison")
self.sortClauseParts.append(
'%s %s' % (attr.getColumnName(self.store), direction))
|
Return a generator which yields the massaged results of this query with
a particular SQL verb.
For an attribute query, massaged results are of the type of that
attribute. For an item query, they are items of the type the query is
supposed to return.
@param verb: a str containing the SQL verb to execute. This really
must be some variant of 'SELECT', the only two currently implemented
being 'SELECT' and 'SELECT DISTINCT'.
def _selectStuff(self, verb='SELECT'):
"""
Return a generator which yields the massaged results of this query with
a particular SQL verb.
For an attribute query, massaged results are of the type of that
attribute. For an item query, they are items of the type the query is
supposed to return.
@param verb: a str containing the SQL verb to execute. This really
must be some variant of 'SELECT', the only two currently implemented
being 'SELECT' and 'SELECT DISTINCT'.
"""
sqlResults = self._runQuery(verb, self._queryTarget)
for row in sqlResults:
yield self._massageData(row)
|
This method is deprecated, a holdover from when queries were iterators,
rather than iterables.
@return: one element of massaged data.
def next(self):
"""
This method is deprecated, a holdover from when queries were iterators,
rather than iterables.
@return: one element of massaged data.
"""
if self._selfiter is None:
warnings.warn(
"Calling 'next' directly on a query is deprecated. "
"Perhaps you want to use iter(query).next(), or something "
"more expressive like store.findFirst or store.findOrCreate?",
DeprecationWarning, stacklevel=2)
self._selfiter = self.__iter__()
return self._selfiter.next()
|
Split up the work of gathering a result set into multiple smaller
'pages', allowing very large queries to be iterated without blocking
for long periods of time.
While simply iterating C{paginate()} is very similar to iterating a
query directly, using this method allows the work to obtain the results
to be performed on demand, over a series of different transaction.
@param pagesize: the number of results gather in each chunk of work.
(This is mostly for testing paginate's implementation.)
@type pagesize: L{int}
@return: an iterable which yields all the results of this query.
def paginate(self, pagesize=20):
"""
Split up the work of gathering a result set into multiple smaller
'pages', allowing very large queries to be iterated without blocking
for long periods of time.
While simply iterating C{paginate()} is very similar to iterating a
query directly, using this method allows the work to obtain the results
to be performed on demand, over a series of different transaction.
@param pagesize: the number of results gather in each chunk of work.
(This is mostly for testing paginate's implementation.)
@type pagesize: L{int}
@return: an iterable which yields all the results of this query.
"""
sort = self.sort
oc = list(sort.orderColumns())
if not oc:
# You can't have an unsorted pagination.
sort = self.tableClass.storeID.ascending
oc = list(sort.orderColumns())
if len(oc) != 1:
raise RuntimeError("%d-column sorts not supported yet with paginate" %(len(oc),))
sortColumn = oc[0][0]
if oc[0][1] == 'ASC':
sortOp = operator.gt
else:
sortOp = operator.lt
if _isColumnUnique(sortColumn):
# This is the easy case. There is never a tie to be broken, so we
# can just remember our last value and yield from there. Right now
# this only happens when the column is a storeID, but hopefully in
# the future we will have more of this.
tiebreaker = None
else:
tiebreaker = self.tableClass.storeID
tied = lambda a, b: (sortColumn.__get__(a) ==
sortColumn.__get__(b))
def _AND(a, b):
if a is None:
return b
return attributes.AND(a, b)
results = list(self.store.query(self.tableClass, self.comparison,
sort=sort, limit=pagesize + 1))
while results:
if len(results) == 1:
# XXX TODO: reject 0 pagesize. If the length of the result set
# is 1, there's no next result to test for a tie with, so we
# must be at the end, and we should just yield the result and finish.
yield results[0]
return
for resultidx in range(len(results) - 1):
# check for a tie.
result = results[resultidx]
nextResult = results[resultidx + 1]
if tied(result, nextResult):
# Yield any ties first, in the appropriate order.
lastTieBreaker = tiebreaker.__get__(result)
# Note that this query is _NOT_ limited: currently large ties
# will generate arbitrarily large amounts of work.
trq = self.store.query(
self.tableClass,
_AND(self.comparison,
sortColumn == sortColumn.__get__(result)))
tiedResults = list(trq)
tiedResults.sort(key=lambda rslt: (sortColumn.__get__(result),
tiebreaker.__get__(result)))
for result in tiedResults:
yield result
# re-start the query here ('result' is set to the
# appropriate value by the inner loop)
break
else:
yield result
lastSortValue = sortColumn.__get__(result) # hooray namespace pollution
results = list(self.store.query(
self.tableClass,
_AND(self.comparison,
sortOp(sortColumn,
sortColumn.__get__(result))),
sort=sort,
limit=pagesize + 1))
|
Convert a row into an Item instance by loading cached items or
creating new ones based on query results.
@param row: an n-tuple, where n is the number of columns specified by
my item type.
@return: an instance of the type specified by this query.
def _massageData(self, row):
"""
Convert a row into an Item instance by loading cached items or
creating new ones based on query results.
@param row: an n-tuple, where n is the number of columns specified by
my item type.
@return: an instance of the type specified by this query.
"""
result = self.store._loadedItem(self.tableClass, row[0], row[1:])
assert result.store is not None, "result %r has funky store" % (result,)
return result
|
Get an L{iaxiom.IQuery} whose results will be values of a single
attribute rather than an Item.
@param attributeName: a L{str}, the name of a Python attribute, that
describes a column on the Item subclass that this query was specified
for.
@return: an L{AttributeQuery} for the column described by the attribute
named L{attributeName} on the item class that this query's results will
be instances of.
def getColumn(self, attributeName, raw=False):
"""
Get an L{iaxiom.IQuery} whose results will be values of a single
attribute rather than an Item.
@param attributeName: a L{str}, the name of a Python attribute, that
describes a column on the Item subclass that this query was specified
for.
@return: an L{AttributeQuery} for the column described by the attribute
named L{attributeName} on the item class that this query's results will
be instances of.
"""
# XXX: 'raw' is undocumented because I think it's completely unused,
# and it's definitely untested. It should probably be removed when
# someone has the time. -glyph
# Quotient POP3 server uses it. Not that it shouldn't be removed.
# ;) -exarkun
attr = getattr(self.tableClass, attributeName)
return AttributeQuery(self.store,
self.tableClass,
self.comparison,
self.limit,
self.offset,
self.sort,
attr,
raw)
|
Delete all the Items which are found by this query.
def deleteFromStore(self):
"""
Delete all the Items which are found by this query.
"""
if (self.limit is None and
not isinstance(self.sort, attributes.UnspecifiedOrdering)):
# The ORDER BY is pointless here, and SQLite complains about it.
return self.cloneQuery(sort=None).deleteFromStore()
#We can do this the fast way or the slow way.
# If there's a 'deleted' callback on the Item type or 'deleteFromStore'
# is overridden, we have to do it the slow way.
deletedOverridden = (
self.tableClass.deleted.im_func is not item.Item.deleted.im_func)
deleteFromStoreOverridden = (
self.tableClass.deleteFromStore.im_func is not
item.Item.deleteFromStore.im_func)
if deletedOverridden or deleteFromStoreOverridden:
for it in self:
it.deleteFromStore()
else:
# Find other item types whose instances need to be deleted
# when items of the type in this query are deleted, and
# remove them from the store.
def itemsToDelete(attr):
return attr.oneOf(self.getColumn("storeID"))
if not item.allowDeletion(self.store, self.tableClass, itemsToDelete):
raise errors.DeletionDisallowed(
'Cannot delete item; '
'has referents with whenDeleted == reference.DISALLOW')
for it in item.dependentItems(self.store,
self.tableClass, itemsToDelete):
it.deleteFromStore()
# actually run the DELETE for the items in this query.
self._runQuery('DELETE', "")
|
Return a list of tables involved in this query,
first checking that no required tables (those in
the query target) have been omitted from the comparison.
def _involvedTables(self):
"""
Return a list of tables involved in this query,
first checking that no required tables (those in
the query target) have been omitted from the comparison.
"""
# SQL and arguments
if self.comparison is not None:
tables = self.comparison.getInvolvedTables()
self.args = self.comparison.getArgs(self.store)
else:
tables = list(self.tableClass)
self.args = []
for tableClass in self.tableClass:
if tableClass not in tables:
raise ValueError(
"Comparison omits required reference to result type %s"
% tableClass.typeName)
return tables
|
Convert a row into a tuple of Item instances, by slicing it
according to the number of columns for each instance, and then
proceeding as for ItemQuery._massageData.
@param row: an n-tuple, where n is the total number of columns
specified by all the item types in this query.
@return: a tuple of instances of the types specified by this query.
def _massageData(self, row):
"""
Convert a row into a tuple of Item instances, by slicing it
according to the number of columns for each instance, and then
proceeding as for ItemQuery._massageData.
@param row: an n-tuple, where n is the total number of columns
specified by all the item types in this query.
@return: a tuple of instances of the types specified by this query.
"""
offset = 0
resultBits = []
for i, tableClass in enumerate(self.tableClass):
numAttrs = self.schemaLengths[i]
result = self.store._loadedItem(self.tableClass[i],
row[offset],
row[offset+1:offset+numAttrs])
assert result.store is not None, "result %r has funky store" % (result,)
resultBits.append(result)
offset += numAttrs
return tuple(resultBits)
|
Clone the original query which this distinct query wraps, and return a new
wrapper around that clone.
def cloneQuery(self, limit=_noItem, sort=_noItem):
"""
Clone the original query which this distinct query wraps, and return a new
wrapper around that clone.
"""
newq = self.query.cloneQuery(limit=limit, sort=sort)
return self.__class__(newq)
|
Count the number of distinct results of the wrapped query.
@return: an L{int} representing the number of distinct results.
def count(self):
"""
Count the number of distinct results of the wrapped query.
@return: an L{int} representing the number of distinct results.
"""
if not self.query.store.autocommit:
self.query.store.checkpoint()
target = ', '.join([
tableClass.storeID.getColumnName(self.query.store)
for tableClass in self.query.tableClass ])
sql, args = self.query._sqlAndArgs(
'SELECT DISTINCT',
target)
sql = 'SELECT COUNT(*) FROM (' + sql + ')'
result = self.query.store.querySQL(sql, args)
assert len(result) == 1, 'more than one result: %r' % (result,)
return result[0][0] or 0
|
Convert a raw database row to the type described by an attribute. For
example, convert a database integer into an L{extime.Time} instance for
an L{attributes.timestamp} attribute.
@param row: a 1-tuple, containing the in-database value from my
attribute.
@return: a value of the type described by my attribute.
def _massageData(self, row):
"""
Convert a raw database row to the type described by an attribute. For
example, convert a database integer into an L{extime.Time} instance for
an L{attributes.timestamp} attribute.
@param row: a 1-tuple, containing the in-database value from my
attribute.
@return: a value of the type described by my attribute.
"""
if self.raw:
return row[0]
return self.attribute.outfilter(row[0], _FakeItemForFilter(self.store))
|
Return the sum of all the values returned by this query. If no results
are specified, return None.
Note: for non-numeric column types the result of this method will be
nonsensical.
@return: a number or None.
def sum(self):
"""
Return the sum of all the values returned by this query. If no results
are specified, return None.
Note: for non-numeric column types the result of this method will be
nonsensical.
@return: a number or None.
"""
res = self._runQuery('SELECT', 'SUM(%s)' % (self._queryTarget,)) or [(0,)]
assert len(res) == 1, "more than one result: %r" % (res,)
dbval = res[0][0] or 0
return self.attribute.outfilter(dbval, _FakeItemForFilter(self.store))
|
Return the average value (as defined by the AVG implementation in the
database) of the values specified by this query.
Note: for non-numeric column types the result of this method will be
nonsensical.
@return: a L{float} representing the 'average' value of this column.
def average(self):
"""
Return the average value (as defined by the AVG implementation in the
database) of the values specified by this query.
Note: for non-numeric column types the result of this method will be
nonsensical.
@return: a L{float} representing the 'average' value of this column.
"""
rslt = self._runQuery('SELECT', 'AVG(%s)' % (self._queryTarget,)) or [(0,)]
assert len(rslt) == 1, 'more than one result: %r' % (rslt,)
return rslt[0][0]
|
attach a child database, returning an identifier for it
def _attachChild(self, child):
"attach a child database, returning an identifier for it"
self._childCounter += 1
databaseName = 'child_db_%d' % (self._childCounter,)
self._attachedChildren[databaseName] = child
# ATTACH DATABASE statements can't use bind paramaters, blech.
self.executeSQL("ATTACH DATABASE '%s' AS %s" % (
child.dbdir.child('db.sqlite').path,
databaseName,))
return databaseName
|
Called during __init__. Check consistency of schema in database with
classes in memory. Load all Python modules for stored items, and load
version information for upgrader service to run later.
def _startup(self):
"""
Called during __init__. Check consistency of schema in database with
classes in memory. Load all Python modules for stored items, and load
version information for upgrader service to run later.
"""
typesToCheck = []
for oid, module, typename, version in self.querySchemaSQL(_schema.ALL_TYPES):
if self.debug:
print
print 'SCHEMA:', oid, module, typename, version
if typename not in _typeNameToMostRecentClass:
try:
namedAny(module)
except ValueError as err:
raise ImportError('cannot find module ' + module, str(err))
self.typenameAndVersionToID[typename, version] = oid
# Can't call this until typenameAndVersionToID is populated, since this
# depends on building a reverse map of that.
persistedSchema = self._loadTypeSchema()
# Now that we have persistedSchema, loop over everything again and
# prepare old types.
for (typename, version), typeID in self.typenameAndVersionToID.iteritems():
cls = _typeNameToMostRecentClass.get(typename)
if cls is not None:
if version != cls.schemaVersion:
typesToCheck.append(
self._prepareOldVersionOf(
typename, version, persistedSchema))
else:
typesToCheck.append(cls)
for cls in typesToCheck:
self._checkTypeSchemaConsistency(cls, persistedSchema)
# Schema is consistent! Now, if I forgot to create any indexes last
# time I saw this table, do it now...
extantIndexes = self._loadExistingIndexes()
for cls in typesToCheck:
self._createIndexesFor(cls, extantIndexes)
self._upgradeManager.checkUpgradePaths()
|
Usage::
s.findOrCreate(userItemClass [, function] [, x=1, y=2, ...])
Example::
class YourItemType(Item):
a = integer()
b = text()
c = integer()
def f(x):
print x, \"-- it's new!\"
s.findOrCreate(YourItemType, f, a=1, b=u'2')
Search for an item with columns in the database that match the passed
set of keyword arguments, returning the first match if one is found,
creating one with the given attributes if not. Takes an optional
positional argument function to call on the new item if it is new.
def findOrCreate(self, userItemClass, __ifnew=None, **attrs):
"""
Usage::
s.findOrCreate(userItemClass [, function] [, x=1, y=2, ...])
Example::
class YourItemType(Item):
a = integer()
b = text()
c = integer()
def f(x):
print x, \"-- it's new!\"
s.findOrCreate(YourItemType, f, a=1, b=u'2')
Search for an item with columns in the database that match the passed
set of keyword arguments, returning the first match if one is found,
creating one with the given attributes if not. Takes an optional
positional argument function to call on the new item if it is new.
"""
andargs = []
for k, v in attrs.iteritems():
col = getattr(userItemClass, k)
andargs.append(col == v)
if len(andargs) == 0:
cond = []
elif len(andargs) == 1:
cond = [andargs[0]]
else:
cond = [attributes.AND(*andargs)]
for result in self.query(userItemClass, *cond):
return result
newItem = userItemClass(store=self, **attrs)
if __ifnew is not None:
__ifnew(newItem)
return newItem
|
Open a new file somewhere in this Store's file area.
@param path: a sequence of path segments.
@return: an L{AtomicFile}.
def newFile(self, *path):
"""
Open a new file somewhere in this Store's file area.
@param path: a sequence of path segments.
@return: an L{AtomicFile}.
"""
assert len(path) > 0, "newFile requires a nonzero number of segments"
if self.dbdir is None:
if self.filesdir is None:
raise RuntimeError("This in-memory store has no file directory")
else:
tmpbase = self.filesdir
else:
tmpbase = self.dbdir
tmpname = tmpbase.child('temp').child(str(tempCounter.next()) + ".tmp")
return AtomicFile(tmpname.path, self.newFilePath(*path))
|
Load all of the stored schema information for all types known by this
store. It's important to load everything all at once (rather than
loading the schema for each type separately as it is needed) to keep
store opening fast. A single query with many results is much faster
than many queries with a few results each.
@return: A dict with two-tuples of item type name and schema version as
keys and lists of five-tuples of attribute schema information for
that type. The elements of the five-tuple are::
- a string giving the name of the Python attribute
- a string giving the SQL type
- a boolean indicating whether the attribute is indexed
- the Python attribute type object (eg, axiom.attributes.integer)
- a string giving documentation for the attribute
def _loadTypeSchema(self):
"""
Load all of the stored schema information for all types known by this
store. It's important to load everything all at once (rather than
loading the schema for each type separately as it is needed) to keep
store opening fast. A single query with many results is much faster
than many queries with a few results each.
@return: A dict with two-tuples of item type name and schema version as
keys and lists of five-tuples of attribute schema information for
that type. The elements of the five-tuple are::
- a string giving the name of the Python attribute
- a string giving the SQL type
- a boolean indicating whether the attribute is indexed
- the Python attribute type object (eg, axiom.attributes.integer)
- a string giving documentation for the attribute
"""
# Oops, need an index going the other way. This only happens once per
# store open, and it's based on data queried from the store, so there
# doesn't seem to be any broader way to cache and re-use the result.
# However, if we keyed the resulting dict on the database typeID rather
# than (typeName, schemaVersion), we wouldn't need the information this
# dict gives us. That would mean changing the callers of this function
# to use typeID instead of that tuple, which may be possible. Probably
# only represents a very tiny possible speedup.
typeIDToNameAndVersion = {}
for key, value in self.typenameAndVersionToID.iteritems():
typeIDToNameAndVersion[value] = key
# Indexing attribute, ordering by it, and getting rid of row_offset
# from the schema and the sorted() here doesn't seem to be any faster
# than doing this.
persistedSchema = sorted(self.querySchemaSQL(
"SELECT attribute, type_id, sqltype, indexed, "
"pythontype, docstring FROM *DATABASE*.axiom_attributes "))
# This is trivially (but measurably!) faster than getattr(attributes,
# pythontype).
getAttribute = attributes.__dict__.__getitem__
result = {}
for (attribute, typeID, sqltype, indexed, pythontype,
docstring) in persistedSchema:
key = typeIDToNameAndVersion[typeID]
if key not in result:
result[key] = []
result[key].append((
attribute, sqltype, indexed,
getAttribute(pythontype), docstring))
return result
|
Called for all known types at database startup: make sure that what we
know (in memory) about this type agrees with what is stored about this
type in the database.
@param actualType: A L{MetaItem} instance which is associated with a
table in this store. The schema it defines in memory will be
checked against the schema known in the database to ensure they
agree.
@param onDiskSchema: A mapping from L{MetaItem} instances (such as
C{actualType}) to the schema known in the database and associated
with C{actualType}.
@raise RuntimeError: if the schema defined by C{actualType} does not
match the database-present schema given in C{onDiskSchema} or if
C{onDiskSchema} contains a newer version of the schema associated
with C{actualType} than C{actualType} represents.
def _checkTypeSchemaConsistency(self, actualType, onDiskSchema):
"""
Called for all known types at database startup: make sure that what we
know (in memory) about this type agrees with what is stored about this
type in the database.
@param actualType: A L{MetaItem} instance which is associated with a
table in this store. The schema it defines in memory will be
checked against the schema known in the database to ensure they
agree.
@param onDiskSchema: A mapping from L{MetaItem} instances (such as
C{actualType}) to the schema known in the database and associated
with C{actualType}.
@raise RuntimeError: if the schema defined by C{actualType} does not
match the database-present schema given in C{onDiskSchema} or if
C{onDiskSchema} contains a newer version of the schema associated
with C{actualType} than C{actualType} represents.
"""
# make sure that both the runtime and the database both know about this
# type; if they don't both know, we can't check that their views are
# consistent
try:
inMemorySchema = _inMemorySchemaCache[actualType]
except KeyError:
inMemorySchema = _inMemorySchemaCache[actualType] = [
(storedAttribute.attrname, storedAttribute.sqltype)
for (name, storedAttribute) in actualType.getSchema()]
key = (actualType.typeName, actualType.schemaVersion)
persistedSchema = [(storedAttribute[0], storedAttribute[1])
for storedAttribute in onDiskSchema[key]]
if inMemorySchema != persistedSchema:
raise RuntimeError(
"Schema mismatch on already-loaded %r <%r> object version %d:\n%s" %
(actualType, actualType.typeName, actualType.schemaVersion,
_diffSchema(persistedSchema, inMemorySchema)))
if actualType.__legacy__:
return
if (key[0], key[1] + 1) in onDiskSchema:
raise RuntimeError(
"Memory version of %r is %d; database has newer" % (
actualType.typeName, key[1]))
|
Note that this database contains old versions of a particular type.
Create the appropriate dummy item subclass and queue the type to be
upgraded.
@param typename: The I{typeName} associated with the schema for which
to create a dummy item class.
@param version: The I{schemaVersion} of the old version of the schema
for which to create a dummy item class.
@param persistedSchema: A mapping giving information about all schemas
stored in the database, used to create the attributes of the dummy
item class.
def _prepareOldVersionOf(self, typename, version, persistedSchema):
"""
Note that this database contains old versions of a particular type.
Create the appropriate dummy item subclass and queue the type to be
upgraded.
@param typename: The I{typeName} associated with the schema for which
to create a dummy item class.
@param version: The I{schemaVersion} of the old version of the schema
for which to create a dummy item class.
@param persistedSchema: A mapping giving information about all schemas
stored in the database, used to create the attributes of the dummy
item class.
"""
appropriateSchema = persistedSchema[typename, version]
# create actual attribute objects
dummyAttributes = {}
for (attribute, sqlType, indexed, pythontype,
docstring) in appropriateSchema:
atr = pythontype(indexed=indexed, doc=docstring)
dummyAttributes[attribute] = atr
dummyBases = []
oldType = declareLegacyItem(
typename, version, dummyAttributes, dummyBases)
self._upgradeManager.queueTypeUpgrade(oldType)
return oldType
|
Find an Item in the database which should be unique. If it is found,
return it. If it is not found, return 'default' if it was passed,
otherwise raise L{errors.ItemNotFound}. If more than one item is
found, raise L{errors.DuplicateUniqueItem}.
@param comparison: implementor of L{iaxiom.IComparison}.
@param default: value to use if the item is not found.
def findUnique(self, tableClass, comparison=None, default=_noItem):
"""
Find an Item in the database which should be unique. If it is found,
return it. If it is not found, return 'default' if it was passed,
otherwise raise L{errors.ItemNotFound}. If more than one item is
found, raise L{errors.DuplicateUniqueItem}.
@param comparison: implementor of L{iaxiom.IComparison}.
@param default: value to use if the item is not found.
"""
results = list(self.query(tableClass, comparison, limit=2))
lr = len(results)
if lr == 0:
if default is _noItem:
raise errors.ItemNotFound(comparison)
else:
return default
elif lr == 2:
raise errors.DuplicateUniqueItem(comparison, results)
elif lr == 1:
return results[0]
else:
raise AssertionError("limit=2 database query returned 3+ results: ",
comparison, results)
|
Usage::
s.findFirst(tableClass [, query arguments except 'limit'])
Example::
class YourItemType(Item):
a = integer()
b = text()
c = integer()
...
it = s.findFirst(YourItemType,
AND(YourItemType.a == 1,
YourItemType.b == u'2'),
sort=YourItemType.c.descending)
Search for an item with columns in the database that match the passed
comparison, offset and sort, returning the first match if one is found,
or the passed default (None if none is passed) if one is not found.
def findFirst(self, tableClass, comparison=None,
offset=None, sort=None, default=None):
"""
Usage::
s.findFirst(tableClass [, query arguments except 'limit'])
Example::
class YourItemType(Item):
a = integer()
b = text()
c = integer()
...
it = s.findFirst(YourItemType,
AND(YourItemType.a == 1,
YourItemType.b == u'2'),
sort=YourItemType.c.descending)
Search for an item with columns in the database that match the passed
comparison, offset and sort, returning the first match if one is found,
or the passed default (None if none is passed) if one is not found.
"""
limit = 1
for item in self.query(tableClass, comparison, limit, offset, sort):
return item
return default
|
Return a generator of instances of C{tableClass},
or tuples of instances if C{tableClass} is a
tuple of classes.
Examples::
fastCars = s.query(Vehicle,
axiom.attributes.AND(
Vehicle.wheels == 4,
Vehicle.maxKPH > 200),
limit=100,
sort=Vehicle.maxKPH.descending)
quotesByClient = s.query( (Client, Quote),
axiom.attributes.AND(
Client.active == True,
Quote.client == Client.storeID,
Quote.created >= someDate),
limit=10,
sort=(Client.name.ascending,
Quote.created.descending))
@param tableClass: a subclass of Item to look for instances of,
or a tuple of subclasses.
@param comparison: a provider of L{IComparison}, or None, to match
all items available in the store. If tableClass is a tuple, then
the comparison must refer to all Item subclasses in that tuple,
and specify the relationships between them.
@param limit: an int to limit the total length of the results, or None
for all available results.
@param offset: an int to specify a starting point within the available
results, or None to start at 0.
@param sort: an L{ISort}, something that comes from an SQLAttribute's
'ascending' or 'descending' attribute.
@return: an L{ItemQuery} object, which is an iterable of Items or
tuples of Items, according to tableClass.
def query(self, tableClass, comparison=None,
limit=None, offset=None, sort=None):
"""
Return a generator of instances of C{tableClass},
or tuples of instances if C{tableClass} is a
tuple of classes.
Examples::
fastCars = s.query(Vehicle,
axiom.attributes.AND(
Vehicle.wheels == 4,
Vehicle.maxKPH > 200),
limit=100,
sort=Vehicle.maxKPH.descending)
quotesByClient = s.query( (Client, Quote),
axiom.attributes.AND(
Client.active == True,
Quote.client == Client.storeID,
Quote.created >= someDate),
limit=10,
sort=(Client.name.ascending,
Quote.created.descending))
@param tableClass: a subclass of Item to look for instances of,
or a tuple of subclasses.
@param comparison: a provider of L{IComparison}, or None, to match
all items available in the store. If tableClass is a tuple, then
the comparison must refer to all Item subclasses in that tuple,
and specify the relationships between them.
@param limit: an int to limit the total length of the results, or None
for all available results.
@param offset: an int to specify a starting point within the available
results, or None to start at 0.
@param sort: an L{ISort}, something that comes from an SQLAttribute's
'ascending' or 'descending' attribute.
@return: an L{ItemQuery} object, which is an iterable of Items or
tuples of Items, according to tableClass.
"""
if isinstance(tableClass, tuple):
queryClass = MultipleItemQuery
else:
queryClass = ItemQuery
return queryClass(self, tableClass, comparison, limit, offset, sort)
|
Create multiple items in the store without loading
corresponding Python objects into memory.
the items' C{stored} callback will not be called.
Example::
myData = [(37, u"Fred", u"Wichita"),
(28, u"Jim", u"Fresno"),
(43, u"Betty", u"Dubuque")]
myStore.batchInsert(FooItem,
[FooItem.age, FooItem.name, FooItem.city],
myData)
@param itemType: an Item subclass to create instances of.
@param itemAttributes: an iterable of attributes on the Item subclass.
@param dataRows: an iterable of iterables, each the same
length as C{itemAttributes} and containing data corresponding
to each attribute in it.
@return: None.
def batchInsert(self, itemType, itemAttributes, dataRows):
"""
Create multiple items in the store without loading
corresponding Python objects into memory.
the items' C{stored} callback will not be called.
Example::
myData = [(37, u"Fred", u"Wichita"),
(28, u"Jim", u"Fresno"),
(43, u"Betty", u"Dubuque")]
myStore.batchInsert(FooItem,
[FooItem.age, FooItem.name, FooItem.city],
myData)
@param itemType: an Item subclass to create instances of.
@param itemAttributes: an iterable of attributes on the Item subclass.
@param dataRows: an iterable of iterables, each the same
length as C{itemAttributes} and containing data corresponding
to each attribute in it.
@return: None.
"""
class FakeItem:
pass
_NEEDS_DEFAULT = object() # token for lookup failure
fakeOSelf = FakeItem()
fakeOSelf.store = self
sql = itemType._baseInsertSQL(self)
indices = {}
schema = [attr for (name, attr) in itemType.getSchema()]
for i, attr in enumerate(itemAttributes):
indices[attr] = i
for row in dataRows:
oid = self.store.executeSchemaSQL(
_schema.CREATE_OBJECT, [self.store.getTypeID(itemType)])
insertArgs = [oid]
for attr in schema:
i = indices.get(attr, _NEEDS_DEFAULT)
if i is _NEEDS_DEFAULT:
pyval = attr.default
else:
pyval = row[i]
dbval = attr._convertPyval(fakeOSelf, pyval)
insertArgs.append(dbval)
self.executeSQL(sql, insertArgs)
|
An item in this store was changed. Add it to the current transaction's
list of changed items, if a transaction is currently underway, or raise
an exception if this L{Store} is currently in a state which does not
allow changes.
def changed(self, item):
"""
An item in this store was changed. Add it to the current transaction's
list of changed items, if a transaction is currently underway, or raise
an exception if this L{Store} is currently in a state which does not
allow changes.
"""
if self._rejectChanges:
raise errors.ChangeRejected()
if self.transaction is not None:
self.transaction.add(item)
self.touched.add(item)
|
Execute C{f(*a, **k)} in the context of a database transaction.
Any changes made to this L{Store} by C{f} will be committed when C{f}
returns. If C{f} raises an exception, those changes will be reverted
instead.
If a transaction is already in progress (in this thread - ie, if a
frame executing L{Store.transact} is already on the call stack), this
will B{not} start a nested transaction. Changes will not be committed
until the existing transaction completes, and an exception raised by
C{f} will not revert changes made by C{f}. You probably don't want to
ever call this if another transaction is in progress.
@return: Whatever C{f(*a, **kw)} returns.
@raise: Whatever C{f(*a, **kw)} raises, or a database exception.
def transact(self, f, *a, **k):
"""
Execute C{f(*a, **k)} in the context of a database transaction.
Any changes made to this L{Store} by C{f} will be committed when C{f}
returns. If C{f} raises an exception, those changes will be reverted
instead.
If a transaction is already in progress (in this thread - ie, if a
frame executing L{Store.transact} is already on the call stack), this
will B{not} start a nested transaction. Changes will not be committed
until the existing transaction completes, and an exception raised by
C{f} will not revert changes made by C{f}. You probably don't want to
ever call this if another transaction is in progress.
@return: Whatever C{f(*a, **kw)} returns.
@raise: Whatever C{f(*a, **kw)} raises, or a database exception.
"""
if self.transaction is not None:
return f(*a, **k)
if self.attachedToParent:
return self.parent.transact(f, *a, **k)
try:
self._begin()
try:
result = f(*a, **k)
self.checkpoint()
except:
exc = Failure()
try:
self.revert()
except:
log.err(exc)
raise
raise
else:
self._commit()
return result
finally:
self._cleanupTxnState()
|
Return the unqualified (ie, no database name) name of the given
attribute of the given table.
@type tableClass: L{MetaItem}
@param tableClass: The Python class associated with a table in the
database.
@param attrname: A sequence of the names of the columns of the
indicated table which will be included in the named index.
@return: A C{str} giving the name of the index which will index the
given attributes of the given table.
def _indexNameOf(self, tableClass, attrname):
"""
Return the unqualified (ie, no database name) name of the given
attribute of the given table.
@type tableClass: L{MetaItem}
@param tableClass: The Python class associated with a table in the
database.
@param attrname: A sequence of the names of the columns of the
indicated table which will be included in the named index.
@return: A C{str} giving the name of the index which will index the
given attributes of the given table.
"""
return "axiomidx_%s_v%d_%s" % (tableClass.typeName,
tableClass.schemaVersion,
'_'.join(attrname))
|
Retrieve the fully qualified name of the table holding items
of a particular class in this store. If the table does not
exist in the database, it will be created as a side-effect.
@param tableClass: an Item subclass
@raises axiom.errors.ItemClassesOnly: if an object other than a
subclass of Item is passed.
@return: a string
def getTableName(self, tableClass):
"""
Retrieve the fully qualified name of the table holding items
of a particular class in this store. If the table does not
exist in the database, it will be created as a side-effect.
@param tableClass: an Item subclass
@raises axiom.errors.ItemClassesOnly: if an object other than a
subclass of Item is passed.
@return: a string
"""
if not (isinstance(tableClass, type) and issubclass(tableClass, item.Item)):
raise errors.ItemClassesOnly("Only subclasses of Item have table names.")
if tableClass not in self.typeToTableNameCache:
self.typeToTableNameCache[tableClass] = self._tableNameFor(tableClass.typeName, tableClass.schemaVersion)
# make sure the table exists
self.getTypeID(tableClass)
return self.typeToTableNameCache[tableClass]
|
Retreive the fully qualified column name for a particular
attribute in this store. The attribute must be bound to an
Item subclass (its type must be valid). If the underlying
table does not exist in the database, it will be created as a
side-effect.
@param tableClass: an Item subclass
@return: a string
def getColumnName(self, attribute):
"""
Retreive the fully qualified column name for a particular
attribute in this store. The attribute must be bound to an
Item subclass (its type must be valid). If the underlying
table does not exist in the database, it will be created as a
side-effect.
@param tableClass: an Item subclass
@return: a string
"""
if attribute not in self.attrToColumnNameCache:
self.attrToColumnNameCache[attribute] = '.'.join(
(self.getTableName(attribute.type),
self.getShortColumnName(attribute)))
return self.attrToColumnNameCache[attribute]
|
Retrieve the typeID associated with a particular table in the
in-database schema for this Store. A typeID is an opaque integer
representing the Item subclass, and the associated table in this
Store's SQLite database.
@param tableClass: a subclass of Item
@return: an integer
def getTypeID(self, tableClass):
"""
Retrieve the typeID associated with a particular table in the
in-database schema for this Store. A typeID is an opaque integer
representing the Item subclass, and the associated table in this
Store's SQLite database.
@param tableClass: a subclass of Item
@return: an integer
"""
key = (tableClass.typeName,
tableClass.schemaVersion)
if key in self.typenameAndVersionToID:
return self.typenameAndVersionToID[key]
return self.transact(self._maybeCreateTable, tableClass, key)
|
Execute the table creation DDL for an Item subclass.
Indexes are *not* created.
@type tableClass: type
@param tableClass: an Item subclass
def _justCreateTable(self, tableClass):
"""
Execute the table creation DDL for an Item subclass.
Indexes are *not* created.
@type tableClass: type
@param tableClass: an Item subclass
"""
sqlstr = []
sqlarg = []
# needs to be calculated including version
tableName = self._tableNameFor(tableClass.typeName,
tableClass.schemaVersion)
sqlstr.append("CREATE TABLE %s (" % tableName)
# The column is named "oid" instead of "storeID" for backwards
# compatibility with the implicit oid/rowid column in old Stores.
sqlarg.append("oid INTEGER PRIMARY KEY")
for nam, atr in tableClass.getSchema():
sqlarg.append("\n%s %s" %
(atr.getShortColumnName(self), atr.sqltype))
sqlstr.append(', '.join(sqlarg))
sqlstr.append(')')
self.createSQL(''.join(sqlstr))
|
A type ID has been requested for an Item subclass whose table was not
present when this Store was opened. Attempt to create the table, and
if that fails because another Store object (perhaps in another process)
has created the table, re-read the schema. When that's done, return
the typeID.
This method is internal to the implementation of getTypeID. It must be
run in a transaction.
@param tableClass: an Item subclass
@param key: a 2-tuple of the tableClass's typeName and schemaVersion
@return: a typeID for the table; a new one if no table exists, or the
existing one if the table was created by another Store object
referencing this database.
def _maybeCreateTable(self, tableClass, key):
"""
A type ID has been requested for an Item subclass whose table was not
present when this Store was opened. Attempt to create the table, and
if that fails because another Store object (perhaps in another process)
has created the table, re-read the schema. When that's done, return
the typeID.
This method is internal to the implementation of getTypeID. It must be
run in a transaction.
@param tableClass: an Item subclass
@param key: a 2-tuple of the tableClass's typeName and schemaVersion
@return: a typeID for the table; a new one if no table exists, or the
existing one if the table was created by another Store object
referencing this database.
"""
try:
self._justCreateTable(tableClass)
except errors.TableAlreadyExists:
# Although we don't have a memory of this table from the last time
# we called "_startup()", another process has updated the schema
# since then.
self._startup()
return self.typenameAndVersionToID[key]
typeID = self.executeSchemaSQL(_schema.CREATE_TYPE,
[tableClass.typeName,
tableClass.__module__,
tableClass.schemaVersion])
self.typenameAndVersionToID[key] = typeID
if self.tablesCreatedThisTransaction is not None:
self.tablesCreatedThisTransaction.append(tableClass)
# If the new type is a legacy type (not the current version), we need
# to queue it for upgrade to ensure that if we are in the middle of an
# upgrade, legacy items of this version get upgraded.
cls = _typeNameToMostRecentClass.get(tableClass.typeName)
if cls is not None and tableClass.schemaVersion != cls.schemaVersion:
self._upgradeManager.queueTypeUpgrade(tableClass)
# We can pass () for extantIndexes here because since the table didn't
# exist for tableClass, none of its indexes could have either.
# Whatever checks _createIndexesFor will make would give the same
# result against the actual set of existing indexes as they will
# against ().
self._createIndexesFor(tableClass, ())
for n, (name, storedAttribute) in enumerate(tableClass.getSchema()):
self.executeSchemaSQL(
_schema.ADD_SCHEMA_ATTRIBUTE,
[typeID, n, storedAttribute.indexed, storedAttribute.sqltype,
storedAttribute.allowNone, storedAttribute.attrname,
storedAttribute.doc, storedAttribute.__class__.__name__])
# XXX probably need something better for pythontype eventually,
# when we figure out a good way to do user-defined attributes or we
# start parameterizing references.
return typeID
|
Create any indexes which don't exist and are required by the schema
defined by C{tableClass}.
@param tableClass: A L{MetaItem} instance which may define a schema
which includes indexes.
@param extantIndexes: A container (anything which can be the right-hand
argument to the C{in} operator) which contains the unqualified
names of all indexes which already exist in the underlying database
and do not need to be created.
def _createIndexesFor(self, tableClass, extantIndexes):
"""
Create any indexes which don't exist and are required by the schema
defined by C{tableClass}.
@param tableClass: A L{MetaItem} instance which may define a schema
which includes indexes.
@param extantIndexes: A container (anything which can be the right-hand
argument to the C{in} operator) which contains the unqualified
names of all indexes which already exist in the underlying database
and do not need to be created.
"""
try:
indexes = _requiredTableIndexes[tableClass]
except KeyError:
indexes = set()
for nam, atr in tableClass.getSchema():
if atr.indexed:
indexes.add(((atr.getShortColumnName(self),), (atr.attrname,)))
for compound in atr.compoundIndexes:
indexes.add((tuple(inatr.getShortColumnName(self) for inatr in compound),
tuple(inatr.attrname for inatr in compound)))
_requiredTableIndexes[tableClass] = indexes
# _ZOMFG_ SQL is such a piece of _shit_: you can't fully qualify the
# table name in CREATE INDEX statements because the _INDEX_ is fully
# qualified!
indexColumnPrefix = '.'.join(self.getTableName(tableClass).split(".")[1:])
for (indexColumns, indexAttrs) in indexes:
nameOfIndex = self._indexNameOf(tableClass, indexAttrs)
if nameOfIndex in extantIndexes:
continue
csql = 'CREATE INDEX %s.%s ON %s(%s)' % (
self.databaseName, nameOfIndex, indexColumnPrefix,
', '.join(indexColumns))
self.createSQL(csql)
|
Retrieve an item by its storeID, and return it.
Note: most of the failure modes of this method are catastrophic and
should not be handled by application code. The only one that
application programmers should be concerned with is KeyError. They are
listed for educational purposes.
@param storeID: an L{int} which refers to the store.
@param default: if passed, return this value rather than raising in the
case where no Item is found.
@raise TypeError: if storeID is not an integer.
@raise UnknownItemType: if the storeID refers to an item row in the
database, but the corresponding type information is not available to
Python.
@raise RuntimeError: if the found item's class version is higher than
the current application is aware of. (In other words, if you have
upgraded a database to a new schema and then attempt to open it with a
previous version of the code.)
@raise errors.ItemNotFound: if no item existed with the given storeID.
@return: an Item, or the given default, if it was passed and no row
corresponding to the given storeID can be located in the database.
def getItemByID(self, storeID, default=_noItem, autoUpgrade=True):
"""
Retrieve an item by its storeID, and return it.
Note: most of the failure modes of this method are catastrophic and
should not be handled by application code. The only one that
application programmers should be concerned with is KeyError. They are
listed for educational purposes.
@param storeID: an L{int} which refers to the store.
@param default: if passed, return this value rather than raising in the
case where no Item is found.
@raise TypeError: if storeID is not an integer.
@raise UnknownItemType: if the storeID refers to an item row in the
database, but the corresponding type information is not available to
Python.
@raise RuntimeError: if the found item's class version is higher than
the current application is aware of. (In other words, if you have
upgraded a database to a new schema and then attempt to open it with a
previous version of the code.)
@raise errors.ItemNotFound: if no item existed with the given storeID.
@return: an Item, or the given default, if it was passed and no row
corresponding to the given storeID can be located in the database.
"""
if not isinstance(storeID, (int, long)):
raise TypeError("storeID *must* be an int or long, not %r" % (
type(storeID).__name__,))
if storeID == STORE_SELF_ID:
return self
try:
return self.objectCache.get(storeID)
except KeyError:
pass
log.msg(interface=iaxiom.IStatEvent, stat_cache_misses=1, key=storeID)
results = self.querySchemaSQL(_schema.TYPEOF_QUERY, [storeID])
assert (len(results) in [1, 0]),\
"Database panic: more than one result for TYPEOF!"
if results:
typename, module, version = results[0]
useMostRecent = False
moreRecentAvailable = False
# The schema may have changed since the last time I saw the
# database. Let's look to see if this is suspiciously broken...
if _typeIsTotallyUnknown(typename, version):
# Another process may have created it - let's re-up the schema
# and see what we get.
self._startup()
# OK, all the modules have been loaded now, everything
# verified.
if _typeIsTotallyUnknown(typename, version):
# If there is STILL no inkling of it anywhere, we are
# almost certainly boned. Let's tell the user in a
# structured way, at least.
raise errors.UnknownItemType(
"cannot load unknown schema/version pair: %r %r - id: %r" %
(typename, version, storeID))
if typename in _typeNameToMostRecentClass:
moreRecentAvailable = True
mostRecent = _typeNameToMostRecentClass[typename]
if mostRecent.schemaVersion < version:
raise RuntimeError("%s:%d - was found in the database and most recent %s is %d" %
(typename, version, typename, mostRecent.schemaVersion))
if mostRecent.schemaVersion == version:
useMostRecent = True
if useMostRecent:
T = mostRecent
else:
T = self.getOldVersionOf(typename, version)
# for the moment we're going to assume no inheritance
attrs = self.querySQL(T._baseSelectSQL(self), [storeID])
if len(attrs) == 0:
if default is _noItem:
raise errors.ItemNotFound(
'No results for known-to-be-good object')
return default
elif len(attrs) > 1:
raise errors.DataIntegrityError(
'Too many results for {:d}'.format(storeID))
attrs = attrs[0]
x = T.existingInStore(self, storeID, attrs)
if moreRecentAvailable and (not useMostRecent) and autoUpgrade:
# upgradeVersion will do caching as necessary, we don't have to
# cache here. (It must, so that app code can safely call
# upgradeVersion and get a consistent object out of it.)
x = self.transact(self._upgradeManager.upgradeItem, x)
elif not x.__legacy__:
# We loaded the most recent version of an object
self.objectCache.cache(storeID, x)
return x
if default is _noItem:
raise errors.ItemNotFound(storeID)
return default
|
For use with SELECT (or SELECT-like PRAGMA) statements.
def querySQL(self, sql, args=()):
"""For use with SELECT (or SELECT-like PRAGMA) statements.
"""
if self.debug:
result = timeinto(self.queryTimes, self._queryandfetch, sql, args)
else:
result = self._queryandfetch(sql, args)
return result
|
For use with auto-committing statements such as CREATE TABLE or CREATE
INDEX.
def createSQL(self, sql, args=()):
"""
For use with auto-committing statements such as CREATE TABLE or CREATE
INDEX.
"""
before = time.time()
self._execSQL(sql, args)
after = time.time()
if after - before > 2.0:
log.msg('Extremely long CREATE: %s' % (after - before,))
log.msg(sql)
|
For use with UPDATE or INSERT statements.
def executeSQL(self, sql, args=()):
"""
For use with UPDATE or INSERT statements.
"""
sql = self._execSQL(sql, args)
result = self.cursor.lastRowID()
if self.executedThisTransaction is not None:
self.executedThisTransaction.append((result, sql, args))
return result
|
given a filename to a file containing a __counter__ variable,
open it, read the count, add one, rewrite the file.
This:
__counter__=123
Becomes:
__counter__=124
def updateVersion(fname):
"""
given a filename to a file containing a __counter__ variable,
open it, read the count, add one, rewrite the file.
This:
__counter__=123
Becomes:
__counter__=124
"""
fname=os.path.abspath(fname)
if not os.path.exists(fname):
print("can not update version! file doesn't exist:\n",fname)
return
with open(fname) as f:
raw=f.read().split("\n")
for i,line in enumerate(raw):
if line.startswith("__counter__="):
version=int(line.split("=")[1])
raw[i]="__counter__=%d"%(version+1)
with open(fname,'w') as f:
f.write("\n".join(raw))
print("upgraded version %d -> %d"%(version,version+1))
sys.path.insert(0,os.path.dirname(fname))
import version
print("New version:",version.__version__)
with open('version.txt','w') as f:
f.write(str(version.__version__))
|
Add the scheduler attribute to the given L{_SubSchedulerParentHook}.
def upgradeParentHook1to2(oldHook):
"""
Add the scheduler attribute to the given L{_SubSchedulerParentHook}.
"""
newHook = oldHook.upgradeVersion(
oldHook.typeName, 1, 2,
loginAccount=oldHook.loginAccount,
scheduledAt=oldHook.scheduledAt,
scheduler=oldHook.store.findFirst(Scheduler))
return newHook
|
Copy C{loginAccount} to C{subStore} and remove the installation marker.
def upgradeParentHook3to4(old):
"""
Copy C{loginAccount} to C{subStore} and remove the installation marker.
"""
new = old.upgradeVersion(
old.typeName, 3, 4, subStore=old.loginAccount)
uninstallFrom(new, new.store)
return new
|
Run my runnable, and reschedule or delete myself based on its result.
Must be run in a transaction.
def invokeRunnable(self):
"""
Run my runnable, and reschedule or delete myself based on its result.
Must be run in a transaction.
"""
runnable = self.runnable
if runnable is None:
self.deleteFromStore()
else:
try:
self.running = True
newTime = runnable.run()
finally:
self.running = False
self._rescheduleFromRun(newTime)
|
An error occurred running my runnable. Check my runnable for an
error-handling method called 'timedEventErrorHandler' that will take
the given failure as an argument, and execute that if available:
otherwise, create a TimedEventFailureLog with information about what
happened to this event.
Must be run in a transaction.
def handleError(self, now, failureObj):
""" An error occurred running my runnable. Check my runnable for an
error-handling method called 'timedEventErrorHandler' that will take
the given failure as an argument, and execute that if available:
otherwise, create a TimedEventFailureLog with information about what
happened to this event.
Must be run in a transaction.
"""
errorHandler = getattr(self.runnable, 'timedEventErrorHandler', None)
if errorHandler is not None:
self._rescheduleFromRun(errorHandler(self, failureObj))
else:
self._defaultErrorHandler(now, failureObj)
|
Remove from given item from the schedule.
If runnable is scheduled to run multiple times, only the temporally first
is removed.
def unscheduleFirst(self, runnable):
"""
Remove from given item from the schedule.
If runnable is scheduled to run multiple times, only the temporally first
is removed.
"""
for evt in self.store.query(TimedEvent, TimedEvent.runnable == runnable, sort=TimedEvent.time.ascending):
evt.deleteFromStore()
break
|
Return an iterable of the times at which the given item is scheduled to
run.
def scheduledTimes(self, runnable):
"""
Return an iterable of the times at which the given item is scheduled to
run.
"""
events = self.store.query(
TimedEvent, TimedEvent.runnable == runnable)
return (event.time for event in events if not event.running)
|
Start calling persistent timed events whose time has come.
def startService(self):
"""
Start calling persistent timed events whose time has come.
"""
super(_SiteScheduler, self).startService()
self._transientSchedule(self.now(), self.now())
|
Stop calling persistent timed events.
def stopService(self):
"""
Stop calling persistent timed events.
"""
super(_SiteScheduler, self).stopService()
if self.timer is not None:
self.timer.cancel()
self.timer = None
|
If the service is currently running, schedule a tick to happen no
later than C{when}.
@param when: The time at which to tick.
@type when: L{epsilon.extime.Time}
@param now: The current time.
@type now: L{epsilon.extime.Time}
def _transientSchedule(self, when, now):
"""
If the service is currently running, schedule a tick to happen no
later than C{when}.
@param when: The time at which to tick.
@type when: L{epsilon.extime.Time}
@param now: The current time.
@type now: L{epsilon.extime.Time}
"""
if not self.running:
return
if self.timer is not None:
if self.timer.getTime() < when.asPOSIXTimestamp():
return
self.timer.cancel()
delay = when.asPOSIXTimestamp() - now.asPOSIXTimestamp()
# reactor.callLater allows only positive delay values. The scheduler
# may want to have scheduled things in the past and that's OK, since we
# are dealing with Time() instances it's impossible to predict what
# they are relative to the current time from user code anyway.
delay = max(_EPSILON, delay)
self.timer = self.callLater(delay, self.tick)
self.nextEventAt = when
|
If this service's store is attached to its parent, ask the parent to
schedule this substore to tick at the given time.
@param when: The time at which to tick.
@type when: L{epsilon.extime.Time}
@param now: Present for signature compatibility with
L{_SiteScheduler._transientSchedule}, but ignored otherwise.
def _transientSchedule(self, when, now):
"""
If this service's store is attached to its parent, ask the parent to
schedule this substore to tick at the given time.
@param when: The time at which to tick.
@type when: L{epsilon.extime.Time}
@param now: Present for signature compatibility with
L{_SiteScheduler._transientSchedule}, but ignored otherwise.
"""
if self.store.parent is not None:
subStore = self.store.parent.getItemByID(self.store.idInParent)
hook = self.store.parent.findOrCreate(
_SubSchedulerParentHook,
subStore=subStore)
hook._schedule(when)
|
Remove the components in the site store for this SubScheduler.
def migrateDown(self):
"""
Remove the components in the site store for this SubScheduler.
"""
subStore = self.store.parent.getItemByID(self.store.idInParent)
ssph = self.store.parent.findUnique(
_SubSchedulerParentHook,
_SubSchedulerParentHook.subStore == subStore,
default=None)
if ssph is not None:
te = self.store.parent.findUnique(TimedEvent,
TimedEvent.runnable == ssph,
default=None)
if te is not None:
te.deleteFromStore()
ssph.deleteFromStore()
|
Recreate the hooks in the site store to trigger this SubScheduler.
def migrateUp(self):
"""
Recreate the hooks in the site store to trigger this SubScheduler.
"""
te = self.store.findFirst(TimedEvent, sort=TimedEvent.time.descending)
if te is not None:
self._transientSchedule(te.time, None)
|
Whenever L{Scheduler} or L{SubScheduler} is created, either newly or
when loaded from a database, emit a deprecation warning referring
people to L{IScheduler}.
def activate(self):
"""
Whenever L{Scheduler} or L{SubScheduler} is created, either newly or
when loaded from a database, emit a deprecation warning referring
people to L{IScheduler}.
"""
# This is unfortunate. Perhaps it is the best thing which works (it is
# the first I found). -exarkun
if '_axiom_memory_dummy' in vars(self):
stacklevel = 7
else:
stacklevel = 5
warnings.warn(
self.__class__.__name__ + " is deprecated since Axiom 0.5.32. "
"Just adapt stores to IScheduler.",
category=PendingDeprecationWarning,
stacklevel=stacklevel)
|
Ensure that this hook is scheduled to run at or before C{when}.
def _schedule(self, when):
"""
Ensure that this hook is scheduled to run at or before C{when}.
"""
sched = IScheduler(self.store)
for scheduledAt in sched.scheduledTimes(self):
if when < scheduledAt:
sched.reschedule(self, scheduledAt, when)
break
else:
sched.schedule(self, when)
|
Tokenize a text, index a term matrix, and build out a graph.
Args:
path (str): The file path.
term_depth (int): Consider the N most frequent terms.
skim_depth (int): Connect each word to the N closest siblings.
d_weights (bool): If true, give "close" nodes low weights.
Returns:
Skimmer: The indexed graph.
def build_graph(path, term_depth=1000, skim_depth=10,
d_weights=False, **kwargs):
"""
Tokenize a text, index a term matrix, and build out a graph.
Args:
path (str): The file path.
term_depth (int): Consider the N most frequent terms.
skim_depth (int): Connect each word to the N closest siblings.
d_weights (bool): If true, give "close" nodes low weights.
Returns:
Skimmer: The indexed graph.
"""
# Tokenize text.
click.echo('\nTokenizing text...')
t = Text.from_file(path)
click.echo('Extracted %d tokens' % len(t.tokens))
m = Matrix()
# Index the term matrix.
click.echo('\nIndexing terms:')
m.index(t, t.most_frequent_terms(term_depth), **kwargs)
g = Skimmer()
# Construct the network.
click.echo('\nGenerating graph:')
g.build(t, m, skim_depth, d_weights)
return g
|
Render a spring layout.
def draw_spring(self, **kwargs):
"""
Render a spring layout.
"""
nx.draw_spring(
self.graph,
with_labels=True,
font_size=10,
edge_color='#dddddd',
node_size=0,
**kwargs
)
plt.show()
|
1. For each term in the passed matrix, score its KDE similarity with
all other indexed terms.
2. With the ordered stack of similarities in hand, skim off the top X
pairs and add them as edges.
Args:
text (Text): The source text instance.
matrix (Matrix): An indexed term matrix.
skim_depth (int): The number of siblings for each term.
d_weights (bool): If true, give "close" words low edge weights.
def build(self, text, matrix, skim_depth=10, d_weights=False):
"""
1. For each term in the passed matrix, score its KDE similarity with
all other indexed terms.
2. With the ordered stack of similarities in hand, skim off the top X
pairs and add them as edges.
Args:
text (Text): The source text instance.
matrix (Matrix): An indexed term matrix.
skim_depth (int): The number of siblings for each term.
d_weights (bool): If true, give "close" words low edge weights.
"""
for anchor in bar(matrix.keys):
n1 = text.unstem(anchor)
# Heaviest pair scores:
pairs = matrix.anchored_pairs(anchor).items()
for term, weight in list(pairs)[:skim_depth]:
# If edges represent distance, use the complement of the raw
# score, so that similar words are connected by "short" edges.
if d_weights: weight = 1-weight
n2 = text.unstem(term)
# NetworkX does not handle numpy types when writing graphml,
# so we cast the weight to a regular float.
self.graph.add_edge(n1, n2, weight=float(weight))
|
Gets a named section from the configuration source.
:param section: a :class:`str` representing the section you want to
retrieve from the configuration source. If ``None`` this will
fallback to the :attr:`plaster.PlasterURL.fragment`.
:param defaults: a :class:`dict` that will get passed to
:class:`configparser.ConfigParser` and will populate the
``DEFAULT`` section.
:return: A :class:`plaster_pastedeploy.ConfigDict` of key/value pairs.
def get_settings(self, section=None, defaults=None):
"""
Gets a named section from the configuration source.
:param section: a :class:`str` representing the section you want to
retrieve from the configuration source. If ``None`` this will
fallback to the :attr:`plaster.PlasterURL.fragment`.
:param defaults: a :class:`dict` that will get passed to
:class:`configparser.ConfigParser` and will populate the
``DEFAULT`` section.
:return: A :class:`plaster_pastedeploy.ConfigDict` of key/value pairs.
"""
# This is a partial reimplementation of
# ``paste.deploy.loadwsgi.ConfigLoader:get_context`` which supports
# "set" and "get" options and filters out any other globals
section = self._maybe_get_default_name(section)
if self.filepath is None:
return {}
parser = self._get_parser(defaults)
defaults = parser.defaults()
try:
raw_items = parser.items(section)
except NoSectionError:
return {}
local_conf = OrderedDict()
get_from_globals = {}
for option, value in raw_items:
if option.startswith("set "):
name = option[4:].strip()
defaults[name] = value
elif option.startswith("get "):
name = option[4:].strip()
get_from_globals[name] = value
# insert a value into local_conf to preserve the order
local_conf[name] = None
else:
# annoyingly pastedeploy filters out all defaults unless
# "get foo" is used to pull it in
if option in defaults:
continue
local_conf[option] = value
for option, global_option in get_from_globals.items():
local_conf[option] = defaults[global_option]
return ConfigDict(local_conf, defaults, self)
|
Reads the configuration source and finds and loads a WSGI
application defined by the entry with name ``name`` per the
PasteDeploy configuration format and loading mechanism.
:param name: The named WSGI app to find, load and return. Defaults to
``None`` which becomes ``main`` inside
:func:`paste.deploy.loadapp`.
:param defaults: The ``global_conf`` that will be used during app
instantiation.
:return: A WSGI application.
def get_wsgi_app(self, name=None, defaults=None):
"""
Reads the configuration source and finds and loads a WSGI
application defined by the entry with name ``name`` per the
PasteDeploy configuration format and loading mechanism.
:param name: The named WSGI app to find, load and return. Defaults to
``None`` which becomes ``main`` inside
:func:`paste.deploy.loadapp`.
:param defaults: The ``global_conf`` that will be used during app
instantiation.
:return: A WSGI application.
"""
name = self._maybe_get_default_name(name)
defaults = self._get_defaults(defaults)
return loadapp(
self.pastedeploy_spec,
name=name,
relative_to=self.relative_to,
global_conf=defaults,
)
|
Reads the configuration source and finds and loads a WSGI server
defined by the server entry with the name ``name`` per the PasteDeploy
configuration format and loading mechanism.
:param name: The named WSGI server to find, load and return. Defaults
to ``None`` which becomes ``main`` inside
:func:`paste.deploy.loadserver`.
:param defaults: The ``global_conf`` that will be used during server
instantiation.
:return: A WSGI server runner callable which accepts a WSGI app.
def get_wsgi_server(self, name=None, defaults=None):
"""
Reads the configuration source and finds and loads a WSGI server
defined by the server entry with the name ``name`` per the PasteDeploy
configuration format and loading mechanism.
:param name: The named WSGI server to find, load and return. Defaults
to ``None`` which becomes ``main`` inside
:func:`paste.deploy.loadserver`.
:param defaults: The ``global_conf`` that will be used during server
instantiation.
:return: A WSGI server runner callable which accepts a WSGI app.
"""
name = self._maybe_get_default_name(name)
defaults = self._get_defaults(defaults)
return loadserver(
self.pastedeploy_spec,
name=name,
relative_to=self.relative_to,
global_conf=defaults,
)
|
Reads the configuration soruce and finds and loads a WSGI filter
defined by the filter entry with the name ``name`` per the PasteDeploy
configuration format and loading mechanism.
:param name: The named WSGI filter to find, load and return. Defaults
to ``None`` which becomes ``main`` inside
:func:`paste.deploy.loadfilter`.
:param defaults: The ``global_conf`` that will be used during filter
instantiation.
:return: A callable that can filter a WSGI application.
def get_wsgi_filter(self, name=None, defaults=None):
"""Reads the configuration soruce and finds and loads a WSGI filter
defined by the filter entry with the name ``name`` per the PasteDeploy
configuration format and loading mechanism.
:param name: The named WSGI filter to find, load and return. Defaults
to ``None`` which becomes ``main`` inside
:func:`paste.deploy.loadfilter`.
:param defaults: The ``global_conf`` that will be used during filter
instantiation.
:return: A callable that can filter a WSGI application.
"""
name = self._maybe_get_default_name(name)
defaults = self._get_defaults(defaults)
return loadfilter(
self.pastedeploy_spec,
name=name,
relative_to=self.relative_to,
global_conf=defaults,
)
|
Return an :class:`collections.OrderedDict` representing the
application config for a WSGI application named ``name`` in the
PasteDeploy config file specified by ``self.uri``.
``defaults``, if passed, should be a dictionary used as variable
assignments like ``{'http_port': 8080}``. This is useful if e.g.
``%(http_port)s`` is used in the config file.
If the ``name`` is None, this will attempt to parse the name from
the ``config_uri`` string expecting the format ``inifile#name``.
If no name is found, the name will default to "main".
:param name: The named WSGI app for which to find the settings.
Defaults to ``None`` which becomes ``main``
inside :func:`paste.deploy.loadapp`.
:param defaults: The ``global_conf`` that will be used during settings
generation.
:return: A :class:`plaster_pastedeploy.ConfigDict` of key/value pairs.
def get_wsgi_app_settings(self, name=None, defaults=None):
"""
Return an :class:`collections.OrderedDict` representing the
application config for a WSGI application named ``name`` in the
PasteDeploy config file specified by ``self.uri``.
``defaults``, if passed, should be a dictionary used as variable
assignments like ``{'http_port': 8080}``. This is useful if e.g.
``%(http_port)s`` is used in the config file.
If the ``name`` is None, this will attempt to parse the name from
the ``config_uri`` string expecting the format ``inifile#name``.
If no name is found, the name will default to "main".
:param name: The named WSGI app for which to find the settings.
Defaults to ``None`` which becomes ``main``
inside :func:`paste.deploy.loadapp`.
:param defaults: The ``global_conf`` that will be used during settings
generation.
:return: A :class:`plaster_pastedeploy.ConfigDict` of key/value pairs.
"""
name = self._maybe_get_default_name(name)
defaults = self._get_defaults(defaults)
conf = appconfig(
self.pastedeploy_spec,
name=name,
relative_to=self.relative_to,
global_conf=defaults,
)
return ConfigDict(conf.local_conf, conf.global_conf, self)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.