text
stringlengths
81
112k
Unique identifier of user object def id(self): """ Unique identifier of user object""" return sa.Column(sa.Integer, primary_key=True, autoincrement=True)
Date of user's last login def last_login_date(self): """ Date of user's last login """ return sa.Column( sa.TIMESTAMP(timezone=False), default=lambda x: datetime.utcnow(), server_default=sa.func.now(), )
Date of user's security code update def security_code_date(self): """ Date of user's security code update """ return sa.Column( sa.TIMESTAMP(timezone=False), default=datetime(2000, 1, 1), server_default="2000-01-01 01:01", )
returns dynamic relationship for groups - allowing for filtering of data def groups_dynamic(self): """ returns dynamic relationship for groups - allowing for filtering of data """ return sa.orm.relationship( "Group", secondary="users_groups", lazy="dynamic", passive_deletes=True, passive_updates=True, )
Returns all resources directly owned by user, can be used to assign ownership of new resources:: user.resources.append(resource) def resources(self): """ Returns all resources directly owned by user, can be used to assign ownership of new resources:: user.resources.append(resource) """ return sa.orm.relationship( "Resource", cascade="all", passive_deletes=True, passive_updates=True, backref="owner", lazy="dynamic", )
Returns all resources directly owned by group, can be used to assign ownership of new resources:: user.resources.append(resource) def resources_dynamic(self): """ Returns all resources directly owned by group, can be used to assign ownership of new resources:: user.resources.append(resource) """ return sa.orm.relationship( "Resource", cascade="all", passive_deletes=True, passive_updates=True, lazy="dynamic", )
validates if group can get assigned with permission def validate_permission(self, key, permission): """ validates if group can get assigned with permission""" if permission.perm_name not in self.__possible_permissions__: raise AssertionError( "perm_name is not one of {}".format(self.__possible_permissions__) ) return permission
Iterates through each of the feed URLs, parses their items, and sends any items to the channel that have not been previously been parsed. def parse_feeds(self, message_channel=True): """ Iterates through each of the feed URLs, parses their items, and sends any items to the channel that have not been previously been parsed. """ if parse: for feed_url in self.feeds: feed = parse(feed_url) for item in feed.entries: if item["id"] not in self.feed_items: self.feed_items.add(item["id"]) if message_channel: message = self.format_item_message(feed, item) self.message_channel(message) return
[ { 'self': [1, 1, 3, 3, 1, 1], 'f': lambda x: x%2, 'assert': lambda ret: ret == [[1, 1], [3, 3], [1, 1]] } ] def ChunkBy(self: dict, f=None): """ [ { 'self': [1, 1, 3, 3, 1, 1], 'f': lambda x: x%2, 'assert': lambda ret: ret == [[1, 1], [3, 3], [1, 1]] } ] """ if f is None: return _chunk(self.items()) if is_to_destruct(f): f = destruct_func(f) return _chunk(self.items(), f)
[ { 'self': [1, 2, 3], 'f': lambda x: x%2, 'assert': lambda ret: ret[0] == [2] and ret[1] == [1, 3] } ] def GroupBy(self: dict, f=None): """ [ { 'self': [1, 2, 3], 'f': lambda x: x%2, 'assert': lambda ret: ret[0] == [2] and ret[1] == [1, 3] } ] """ if f and is_to_destruct(f): f = destruct_func(f) return _group_by(self.items(), f)
[ { 'self': [1, 2, 3], 'n': 2, 'assert': lambda ret: list(ret) == [1, 2] } ] def Take(self: dict, n): """ [ { 'self': [1, 2, 3], 'n': 2, 'assert': lambda ret: list(ret) == [1, 2] } ] """ for i, e in enumerate(self.items()): if i == n: break yield e
[ { 'self': [1, 2, 3], 'f': lambda e: e%2, 'assert': lambda ret: list(ret) == [1, 3] } ] def TakeIf(self: dict, f): """ [ { 'self': [1, 2, 3], 'f': lambda e: e%2, 'assert': lambda ret: list(ret) == [1, 3] } ] """ if is_to_destruct(f): f = destruct_func(f) return (e for e in self.items() if f(e))
[ { 'self': [1, 2, 3, 4, 5], 'f': lambda x: x < 4, 'assert': lambda ret: list(ret) == [1, 2, 3] } ] def TakeWhile(self: dict, f): """ [ { 'self': [1, 2, 3, 4, 5], 'f': lambda x: x < 4, 'assert': lambda ret: list(ret) == [1, 2, 3] } ] """ if is_to_destruct(f): f = destruct_func(f) for e in self.items(): if not f(e): break yield e
[ { 'self': [1, 2, 3, 4, 5], 'n': 3, 'assert': lambda ret: list(ret) == [1, 2] } ] def Drop(self: dict, n): """ [ { 'self': [1, 2, 3, 4, 5], 'n': 3, 'assert': lambda ret: list(ret) == [1, 2] } ] """ n = len(self) - n if n <= 0: yield from self.items() else: for i, e in enumerate(self.items()): if i == n: break yield e
[ { 'self': [1, 2, 3, 4, 5], 'n': 3, 'assert': lambda ret: list(ret) == [4, 5] } ] def Skip(self: dict, n): """ [ { 'self': [1, 2, 3, 4, 5], 'n': 3, 'assert': lambda ret: list(ret) == [4, 5] } ] """ con = self.items() for i, _ in enumerate(con): if i == n: break return con
Output readings for specified number of rows to console def print_meter_record(file_path, rows=5): """ Output readings for specified number of rows to console """ m = nr.read_nem_file(file_path) print('Header:', m.header) print('Transactions:', m.transactions) for nmi in m.readings: for channel in m.readings[nmi]: print(nmi, 'Channel', channel) for reading in m.readings[nmi][channel][-rows:]: print('', reading)
returns all users that have permissions for this resource def users(self): """ returns all users that have permissions for this resource""" return sa.orm.relationship( "User", secondary="users_resources_permissions", passive_deletes=True, passive_updates=True, )
This returns you subtree of ordered objects relative to the start resource_id (currently only implemented in postgresql) :param resource_id: :param limit_depth: :param db_session: :return: def from_resource_deeper( cls, resource_id=None, limit_depth=1000000, db_session=None, *args, **kwargs ): """ This returns you subtree of ordered objects relative to the start resource_id (currently only implemented in postgresql) :param resource_id: :param limit_depth: :param db_session: :return: """ tablename = cls.model.__table__.name raw_q = """ WITH RECURSIVE subtree AS ( SELECT res.*, 1 AS depth, LPAD(res.ordering::CHARACTER VARYING, 7, '0') AS sorting, res.resource_id::CHARACTER VARYING AS path FROM {tablename} AS res WHERE res.resource_id = :resource_id UNION ALL SELECT res_u.*, depth+1 AS depth, (st.sorting::CHARACTER VARYING || '/' || LPAD(res_u.ordering::CHARACTER VARYING, 7, '0') ) AS sorting, (st.path::CHARACTER VARYING || '/' || res_u.resource_id::CHARACTER VARYING ) AS path FROM {tablename} res_u, subtree st WHERE res_u.parent_id = st.resource_id ) SELECT * FROM subtree WHERE depth<=:depth ORDER BY sorting; """.format( tablename=tablename ) # noqa db_session = get_db_session(db_session) text_obj = sa.text(raw_q) query = db_session.query(cls.model, "depth", "sorting", "path") query = query.from_statement(text_obj) query = query.params(resource_id=resource_id, depth=limit_depth) return query
This deletes whole branch with children starting from resource_id :param resource_id: :param db_session: :return: def delete_branch(cls, resource_id=None, db_session=None, *args, **kwargs): """ This deletes whole branch with children starting from resource_id :param resource_id: :param db_session: :return: """ tablename = cls.model.__table__.name # lets lock rows to prevent bad tree states resource = ResourceService.lock_resource_for_update( resource_id=resource_id, db_session=db_session ) parent_id = resource.parent_id ordering = resource.ordering raw_q = """ WITH RECURSIVE subtree AS ( SELECT res.resource_id FROM {tablename} AS res WHERE res.resource_id = :resource_id UNION ALL SELECT res_u.resource_id FROM {tablename} res_u, subtree st WHERE res_u.parent_id = st.resource_id ) DELETE FROM resources where resource_id in (select * from subtree); """.format( tablename=tablename ) # noqa db_session = get_db_session(db_session) text_obj = sa.text(raw_q) db_session.execute(text_obj, params={"resource_id": resource_id}) cls.shift_ordering_down(parent_id, ordering, db_session=db_session) return True
This returns you subtree of ordered objects relative to the start parent_id (currently only implemented in postgresql) :param resource_id: :param limit_depth: :param db_session: :return: def from_parent_deeper( cls, parent_id=None, limit_depth=1000000, db_session=None, *args, **kwargs ): """ This returns you subtree of ordered objects relative to the start parent_id (currently only implemented in postgresql) :param resource_id: :param limit_depth: :param db_session: :return: """ if parent_id: limiting_clause = "res.parent_id = :parent_id" else: limiting_clause = "res.parent_id is null" tablename = cls.model.__table__.name raw_q = """ WITH RECURSIVE subtree AS ( SELECT res.*, 1 AS depth, LPAD(res.ordering::CHARACTER VARYING, 7, '0') AS sorting, res.resource_id::CHARACTER VARYING AS path FROM {tablename} AS res WHERE {limiting_clause} UNION ALL SELECT res_u.*, depth+1 AS depth, (st.sorting::CHARACTER VARYING || '/' || LPAD(res_u.ordering::CHARACTER VARYING, 7, '0') ) AS sorting, (st.path::CHARACTER VARYING || '/' || res_u.resource_id::CHARACTER VARYING ) AS path FROM {tablename} res_u, subtree st WHERE res_u.parent_id = st.resource_id ) SELECT * FROM subtree WHERE depth<=:depth ORDER BY sorting; """.format( tablename=tablename, limiting_clause=limiting_clause ) # noqa db_session = get_db_session(db_session) text_obj = sa.text(raw_q) query = db_session.query(cls.model, "depth", "sorting", "path") query = query.from_statement(text_obj) query = query.params(parent_id=parent_id, depth=limit_depth) return query
Returns a dictionary in form of {node:Resource, children:{node_id: Resource}} :param result: :return: def build_subtree_strut(self, result, *args, **kwargs): """ Returns a dictionary in form of {node:Resource, children:{node_id: Resource}} :param result: :return: """ items = list(result) root_elem = {"node": None, "children": OrderedDict()} if len(items) == 0: return root_elem for _, node in enumerate(items): new_elem = {"node": node.Resource, "children": OrderedDict()} path = list(map(int, node.path.split("/"))) parent_node = root_elem normalized_path = path[:-1] if normalized_path: for path_part in normalized_path: parent_node = parent_node["children"][path_part] parent_node["children"][new_elem["node"].resource_id] = new_elem return root_elem
This returns you path to root node starting from object_id currently only for postgresql :param object_id: :param limit_depth: :param db_session: :return: def path_upper( cls, object_id, limit_depth=1000000, db_session=None, *args, **kwargs ): """ This returns you path to root node starting from object_id currently only for postgresql :param object_id: :param limit_depth: :param db_session: :return: """ tablename = cls.model.__table__.name raw_q = """ WITH RECURSIVE subtree AS ( SELECT res.*, 1 as depth FROM {tablename} res WHERE res.resource_id = :resource_id UNION ALL SELECT res_u.*, depth+1 as depth FROM {tablename} res_u, subtree st WHERE res_u.resource_id = st.parent_id ) SELECT * FROM subtree WHERE depth<=:depth; """.format( tablename=tablename ) db_session = get_db_session(db_session) q = ( db_session.query(cls.model) .from_statement(sa.text(raw_q)) .params(resource_id=object_id, depth=limit_depth) ) return q
Moves node to new location in the tree :param resource_id: resource to move :param to_position: new position :param new_parent_id: new parent id :param db_session: :return: def move_to_position( cls, resource_id, to_position, new_parent_id=noop, db_session=None, *args, **kwargs ): """ Moves node to new location in the tree :param resource_id: resource to move :param to_position: new position :param new_parent_id: new parent id :param db_session: :return: """ db_session = get_db_session(db_session) # lets lock rows to prevent bad tree states resource = ResourceService.lock_resource_for_update( resource_id=resource_id, db_session=db_session ) ResourceService.lock_resource_for_update( resource_id=resource.parent_id, db_session=db_session ) same_branch = False # reset if parent is same as old if new_parent_id == resource.parent_id: new_parent_id = noop if new_parent_id is not noop: cls.check_node_parent(resource_id, new_parent_id, db_session=db_session) else: same_branch = True if new_parent_id is noop: # it is not guaranteed that parent exists parent_id = resource.parent_id if resource else None else: parent_id = new_parent_id cls.check_node_position( parent_id, to_position, on_same_branch=same_branch, db_session=db_session ) # move on same branch if new_parent_id is noop: order_range = list(sorted((resource.ordering, to_position))) move_down = resource.ordering > to_position query = db_session.query(cls.model) query = query.filter(cls.model.parent_id == parent_id) query = query.filter(cls.model.ordering.between(*order_range)) if move_down: query.update( {cls.model.ordering: cls.model.ordering + 1}, synchronize_session=False, ) else: query.update( {cls.model.ordering: cls.model.ordering - 1}, synchronize_session=False, ) db_session.flush() db_session.expire(resource) resource.ordering = to_position # move between branches else: cls.shift_ordering_down( resource.parent_id, resource.ordering, db_session=db_session ) cls.shift_ordering_up(new_parent_id, to_position, db_session=db_session) db_session.expire(resource) resource.parent_id = new_parent_id resource.ordering = to_position db_session.flush() return True
Shifts ordering to "open a gap" for node insertion, begins the shift from given position :param parent_id: :param position: :param db_session: :return: def shift_ordering_up(cls, parent_id, position, db_session=None, *args, **kwargs): """ Shifts ordering to "open a gap" for node insertion, begins the shift from given position :param parent_id: :param position: :param db_session: :return: """ db_session = get_db_session(db_session) query = db_session.query(cls.model) query = query.filter(cls.model.parent_id == parent_id) query = query.filter(cls.model.ordering >= position) query.update( {cls.model.ordering: cls.model.ordering + 1}, synchronize_session=False ) db_session.flush()
Sets node position for new node in the tree :param resource_id: resource to move :param to_position: new position :param db_session: :return:def count_children(cls, resource_id, db_session=None): def set_position(cls, resource_id, to_position, db_session=None, *args, **kwargs): """ Sets node position for new node in the tree :param resource_id: resource to move :param to_position: new position :param db_session: :return:def count_children(cls, resource_id, db_session=None): """ db_session = get_db_session(db_session) # lets lock rows to prevent bad tree states resource = ResourceService.lock_resource_for_update( resource_id=resource_id, db_session=db_session ) cls.check_node_position( resource.parent_id, to_position, on_same_branch=True, db_session=db_session ) cls.shift_ordering_up(resource.parent_id, to_position, db_session=db_session) db_session.flush() db_session.expire(resource) resource.ordering = to_position return True
Checks if parent destination is valid for node :param resource_id: :param new_parent_id: :param db_session: :return: def check_node_parent( cls, resource_id, new_parent_id, db_session=None, *args, **kwargs ): """ Checks if parent destination is valid for node :param resource_id: :param new_parent_id: :param db_session: :return: """ db_session = get_db_session(db_session) new_parent = ResourceService.lock_resource_for_update( resource_id=new_parent_id, db_session=db_session ) # we are not moving to "root" so parent should be found if not new_parent and new_parent_id is not None: raise ZigguratResourceTreeMissingException("New parent node not found") else: result = cls.path_upper(new_parent_id, db_session=db_session) path_ids = [r.resource_id for r in result] if resource_id in path_ids: raise ZigguratResourceTreePathException( "Trying to insert node into itself" )
Counts children of resource node :param resource_id: :param db_session: :return: def count_children(cls, resource_id, db_session=None, *args, **kwargs): """ Counts children of resource node :param resource_id: :param db_session: :return: """ query = db_session.query(cls.model.resource_id) query = query.filter(cls.model.parent_id == resource_id) return query.count()
Checks if node position for given parent is valid, raises exception if this is not the case :param parent_id: :param position: :param on_same_branch: indicates that we are checking same branch :param db_session: :return: def check_node_position( cls, parent_id, position, on_same_branch, db_session=None, *args, **kwargs ): """ Checks if node position for given parent is valid, raises exception if this is not the case :param parent_id: :param position: :param on_same_branch: indicates that we are checking same branch :param db_session: :return: """ db_session = get_db_session(db_session) if not position or position < 1: raise ZigguratResourceOutOfBoundaryException( "Position is lower than {}", value=1 ) item_count = cls.count_children(parent_id, db_session=db_session) max_value = item_count if on_same_branch else item_count + 1 if position > max_value: raise ZigguratResourceOutOfBoundaryException( "Maximum resource ordering is {}", value=max_value )
Given two integers (b, n), returns (gcd(b, n), a, m) such that a*b + n*m = gcd(b, n). Adapted from several sources: https://brilliant.org/wiki/extended-euclidean-algorithm/ https://rosettacode.org/wiki/Modular_inverse https://en.wikibooks.org/wiki/Algorithm_Implementation/Mathematics/Extended_Euclidean_algorithm https://en.wikipedia.org/wiki/Euclidean_algorithm >>> egcd(1, 1) (1, 0, 1) >>> egcd(12, 8) (4, 1, -1) >>> egcd(23894798501898, 23948178468116) (2, 2437250447493, -2431817869532) >>> egcd(pow(2, 50), pow(3, 50)) (1, -260414429242905345185687, 408415383037561) def egcd(b, n): ''' Given two integers (b, n), returns (gcd(b, n), a, m) such that a*b + n*m = gcd(b, n). Adapted from several sources: https://brilliant.org/wiki/extended-euclidean-algorithm/ https://rosettacode.org/wiki/Modular_inverse https://en.wikibooks.org/wiki/Algorithm_Implementation/Mathematics/Extended_Euclidean_algorithm https://en.wikipedia.org/wiki/Euclidean_algorithm >>> egcd(1, 1) (1, 0, 1) >>> egcd(12, 8) (4, 1, -1) >>> egcd(23894798501898, 23948178468116) (2, 2437250447493, -2431817869532) >>> egcd(pow(2, 50), pow(3, 50)) (1, -260414429242905345185687, 408415383037561) ''' (x0, x1, y0, y1) = (1, 0, 0, 1) while n != 0: (q, b, n) = (b // n, n, b % n) (x0, x1) = (x1, x0 - q * x1) (y0, y1) = (y1, y0 - q * y1) return (b, x0, y0)
Add the needed transformations and supressions. def register(linter): """Add the needed transformations and supressions. """ linter.register_checker(MongoEngineChecker(linter)) add_transform('mongoengine') add_transform('mongomotor') suppress_qs_decorator_messages(linter) suppress_fields_attrs_messages(linter)
Transpose all channels and output a csv that is easier to read and do charting on :param file_name: The NEM file to process :param nmi: Which NMI to output if more than one :param output_file: Specify different output location :returns: The file that was created def output_as_csv(file_name, nmi=None, output_file=None): """ Transpose all channels and output a csv that is easier to read and do charting on :param file_name: The NEM file to process :param nmi: Which NMI to output if more than one :param output_file: Specify different output location :returns: The file that was created """ m = read_nem_file(file_name) if nmi is None: nmi = list(m.readings.keys())[0] # Use first NMI channels = list(m.transactions[nmi].keys()) num_records = len(m.readings[nmi][channels[0]]) last_date = m.readings[nmi][channels[0]][-1].t_end if output_file is None: output_file = '{}_{}_transposed.csv'.format( nmi, last_date.strftime('%Y%m%d')) with open(output_file, 'w', newline='') as csvfile: cwriter = csv.writer( csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) heading_list = ['period_start', 'period_end'] for channel in channels: heading_list.append(channel) heading_list.append('quality_method') cwriter.writerow(heading_list) for i in range(0, num_records): t_start = m.readings[nmi][channels[0]][i].t_start t_end = m.readings[nmi][channels[0]][i].t_end quality_method = m.readings[nmi][channels[0]][i].quality_method row_list = [t_start, t_end] for ch in channels: val = m.readings[nmi][ch][i].read_value row_list.append(val) row_list.append(quality_method) cwriter.writerow(row_list) return output_file
Fetch row using primary key - will use existing object in session if already present :param group_id: :param db_session: :return: def get(cls, group_id, db_session=None): """ Fetch row using primary key - will use existing object in session if already present :param group_id: :param db_session: :return: """ db_session = get_db_session(db_session) return db_session.query(cls.model).get(group_id)
fetch group by name :param group_name: :param db_session: :return: def by_group_name(cls, group_name, db_session=None): """ fetch group by name :param group_name: :param db_session: :return: """ db_session = get_db_session(db_session) query = db_session.query(cls.model).filter(cls.model.group_name == group_name) return query.first()
returns paginator over users belonging to the group :param instance: :param page: :param item_count: :param items_per_page: :param user_ids: :param GET_params: :return: def get_user_paginator( cls, instance, page=1, item_count=None, items_per_page=50, user_ids=None, GET_params=None, ): """ returns paginator over users belonging to the group :param instance: :param page: :param item_count: :param items_per_page: :param user_ids: :param GET_params: :return: """ if not GET_params: GET_params = {} GET_params.pop("page", None) query = instance.users_dynamic if user_ids: query = query.filter(cls.models_proxy.UserGroup.user_id.in_(user_ids)) return SqlalchemyOrmPage( query, page=page, item_count=item_count, items_per_page=items_per_page, **GET_params )
returns list of permissions and resources for this group, resource_ids restricts the search to specific resources :param instance: :param perm_names: :param resource_ids: :param resource_types: :param db_session: :return: def resources_with_possible_perms( cls, instance, perm_names=None, resource_ids=None, resource_types=None, db_session=None, ): """ returns list of permissions and resources for this group, resource_ids restricts the search to specific resources :param instance: :param perm_names: :param resource_ids: :param resource_types: :param db_session: :return: """ db_session = get_db_session(db_session, instance) query = db_session.query( cls.models_proxy.GroupResourcePermission.perm_name, cls.models_proxy.Group, cls.models_proxy.Resource, ) query = query.filter( cls.models_proxy.Resource.resource_id == cls.models_proxy.GroupResourcePermission.resource_id ) query = query.filter( cls.models_proxy.Group.id == cls.models_proxy.GroupResourcePermission.group_id ) if resource_ids: query = query.filter( cls.models_proxy.GroupResourcePermission.resource_id.in_(resource_ids) ) if resource_types: query = query.filter( cls.models_proxy.Resource.resource_type.in_(resource_types) ) if perm_names not in ([ANY_PERMISSION], ANY_PERMISSION) and perm_names: query = query.filter( cls.models_proxy.GroupResourcePermission.perm_name.in_(perm_names) ) query = query.filter( cls.models_proxy.GroupResourcePermission.group_id == instance.id ) perms = [ PermissionTuple( None, row.perm_name, "group", instance, row.Resource, False, True ) for row in query ] for resource in instance.resources: perms.append( PermissionTuple( None, ALL_PERMISSIONS, "group", instance, resource, True, True ) ) return perms
创建支持上下文管理的pool def create_pool( database, minsize=1, maxsize=10, echo=False, loop=None, **kwargs ): """ 创建支持上下文管理的pool """ coro = _create_pool( database=database, minsize=minsize, maxsize=maxsize, echo=echo, loop=loop, **kwargs ) return _PoolContextManager(coro)
Wait for closing all pool's connections. def wait_closed(self): """ Wait for closing all pool's connections. """ if self._closed: return if not self._closing: raise RuntimeError( ".wait_closed() should be called " "after .close()" ) while self._free: conn = self._free.popleft() if not conn.closed: yield from conn.close() else: # pragma: no cover pass with (yield from self._cond): while self.size > self.freesize: yield from self._cond.wait() self._used.clear() self._closed = True
同步关闭 def sync_close(self): """ 同步关闭 """ if self._closed: return while self._free: conn = self._free.popleft() if not conn.closed: # pragma: no cover conn.sync_close() for conn in self._used: if not conn.closed: # pragma: no cover conn.sync_close() self._terminated.add(conn) self._used.clear() self._closed = True
iterate over free connections and remove timeouted ones def _fill_free_pool(self, override_min): """ iterate over free connections and remove timeouted ones """ while self.size < self.minsize: self._acquiring += 1 try: conn = yield from connect( database=self._database, echo=self._echo, loop=self._loop, **self._conn_kwargs ) self._free.append(conn) self._cond.notify() finally: self._acquiring -= 1 if self._free: return if override_min and self.size < self.maxsize: self._acquiring += 1 try: conn = yield from connect( database=self._database, echo=self._echo, loop=self._loop, **self._conn_kwargs ) self._free.append(conn) self._cond.notify() finally: self._acquiring -= 1
Adds the function to the list of registered functions. def add_function(self, function): """ Adds the function to the list of registered functions. """ function = self.build_function(function) if function.name in self.functions: raise FunctionAlreadyRegistered(function.name) self.functions[function.name] = function
Returns a function if it is registered, the context is ignored. def get_one(self, context, name): """ Returns a function if it is registered, the context is ignored. """ try: return self.functions[name] except KeyError: raise FunctionNotFound(name)
Verbatim copy from: https://github.com/django/django/blob/1.9.13/django/db/models/fields/subclassing.py#L38 def subfield_get(self, obj, type=None): """ Verbatim copy from: https://github.com/django/django/blob/1.9.13/django/db/models/fields/subclassing.py#L38 """ if obj is None: return self return obj.__dict__[self.field.name]
Replace generic key related attribute with filters by object_id and content_type fields def _preprocess_kwargs(self, initial_kwargs): """ Replace generic key related attribute with filters by object_id and content_type fields """ kwargs = initial_kwargs.copy() generic_key_related_kwargs = self._get_generic_key_related_kwargs(initial_kwargs) for key, value in generic_key_related_kwargs.items(): # delete old kwarg that was related to generic key del kwargs[key] try: suffix = key.split('__')[1] except IndexError: suffix = None # add new kwargs that related to object_id and content_type fields new_kwargs = self._get_filter_object_id_and_content_type_filter_kwargs(value, suffix) kwargs.update(new_kwargs) return kwargs
:param data: :param col_name: :param new_col_name: :param categories: :param max_categories: max proportion threshold of categories :return: new categories :rtype dict: def categorize( data, col_name: str = None, new_col_name: str = None, categories: dict = None, max_categories: float = 0.15 ): """ :param data: :param col_name: :param new_col_name: :param categories: :param max_categories: max proportion threshold of categories :return: new categories :rtype dict: """ _categories = {} if col_name is None: if categories is not None: raise Exception( 'col_name is None when categories was defined.' ) # create a list of cols with all object columns cols = [ k for k in data.keys() if data[k].dtype == 'object' and (data[k].unique() / data[k].count()) <= max_categories ] else: # create a list with col_name if new_col_name is not None: data[new_col_name] = data[col_name] col_name = new_col_name cols = [col_name] for c in cols: if categories is not None: # assert all keys is a number assert all(type(k) in (int, float) for k in categories.keys()) # replace values using given categories dict data[c].replace(categories, inplace=True) # change column to categorical type data[c] = data[c].astype('category') # update categories information _categories.update({c: categories}) else: # change column to categorical type data[c] = data[c].astype('category') # change column to categorical type _categories.update({ c: dict(enumerate( data[c].cat.categories, )) }) return _categories
Remove columns with more NA values than threshold level :param data: :param axis: Axes are defined for arrays with more than one dimension. A 2-dimensional array has two corresponding axes: the first running vertically downwards across rows (axis 0), and the second running horizontally across columns (axis 1). (https://docs.scipy.org/doc/numpy-1.10.0/glossary.html) :param params: :return: def dropna(data: pd.DataFrame, axis: int, **params): """ Remove columns with more NA values than threshold level :param data: :param axis: Axes are defined for arrays with more than one dimension. A 2-dimensional array has two corresponding axes: the first running vertically downwards across rows (axis 0), and the second running horizontally across columns (axis 1). (https://docs.scipy.org/doc/numpy-1.10.0/glossary.html) :param params: :return: """ if axis == 0: dropna_rows(data=data, **params) else: dropna_columns(data=data, **params)
Remove columns with more NA values than threshold level :param data: :param max_na_values: proportion threshold of max na values :return: def dropna_columns(data: pd.DataFrame, max_na_values: int=0.15): """ Remove columns with more NA values than threshold level :param data: :param max_na_values: proportion threshold of max na values :return: """ size = data.shape[0] df_na = (data.isnull().sum()/size) >= max_na_values data.drop(df_na[df_na].index, axis=1, inplace=True)
Remove columns with more NA values than threshold level :param data: :param columns_name: :return: def dropna_rows(data: pd.DataFrame, columns_name: str=None): """ Remove columns with more NA values than threshold level :param data: :param columns_name: :return: """ params = {} if columns_name is not None: params.update({'subset': columns_name.split(',')}) data.dropna(inplace=True, **params)
Remove columns when the proportion of the total of unique values is more than the max_unique_values threshold, just for columns with type as object or category :param data: :param max_unique_values: :return: def drop_columns_with_unique_values( data: pd.DataFrame, max_unique_values: int = 0.25 ): """ Remove columns when the proportion of the total of unique values is more than the max_unique_values threshold, just for columns with type as object or category :param data: :param max_unique_values: :return: """ size = data.shape[0] df_uv = data.apply( lambda se: ( (se.dropna().unique().shape[0]/size) > max_unique_values and se.dtype in ['object', 'category'] ) ) data.drop(df_uv[df_uv].index, axis=1, inplace=True)
To get an actual value for object quotas limit and usage issue a **GET** request against */api/<objects>/*. To get all quotas visible to the user issue a **GET** request against */api/quotas/* def list(self, request, *args, **kwargs): """ To get an actual value for object quotas limit and usage issue a **GET** request against */api/<objects>/*. To get all quotas visible to the user issue a **GET** request against */api/quotas/* """ return super(QuotaViewSet, self).list(request, *args, **kwargs)
To set quota limit issue a **PUT** request against */api/quotas/<quota uuid>** with limit values. Please note that if a quota is a cache of a backend quota (e.g. 'storage' size of an OpenStack tenant), it will be impossible to modify it through */api/quotas/<quota uuid>** endpoint. Example of changing quota limit: .. code-block:: http POST /api/quotas/6ad5f49d6d6c49648573b2b71f44a42b/ HTTP/1.1 Content-Type: application/json Accept: application/json Authorization: Token c84d653b9ec92c6cbac41c706593e66f567a7fa4 Host: example.com { "limit": 2000.0 } Example of changing quota threshold: .. code-block:: http PUT /api/quotas/6ad5f49d6d6c49648573b2b71f44a42b/ HTTP/1.1 Content-Type: application/json Accept: application/json Authorization: Token c84d653b9ec92c6cbac41c706593e66f567a7fa4 Host: example.com { "threshold": 100.0 } def retrieve(self, request, *args, **kwargs): """ To set quota limit issue a **PUT** request against */api/quotas/<quota uuid>** with limit values. Please note that if a quota is a cache of a backend quota (e.g. 'storage' size of an OpenStack tenant), it will be impossible to modify it through */api/quotas/<quota uuid>** endpoint. Example of changing quota limit: .. code-block:: http POST /api/quotas/6ad5f49d6d6c49648573b2b71f44a42b/ HTTP/1.1 Content-Type: application/json Accept: application/json Authorization: Token c84d653b9ec92c6cbac41c706593e66f567a7fa4 Host: example.com { "limit": 2000.0 } Example of changing quota threshold: .. code-block:: http PUT /api/quotas/6ad5f49d6d6c49648573b2b71f44a42b/ HTTP/1.1 Content-Type: application/json Accept: application/json Authorization: Token c84d653b9ec92c6cbac41c706593e66f567a7fa4 Host: example.com { "threshold": 100.0 } """ return super(QuotaViewSet, self).retrieve(request, *args, **kwargs)
Historical data endpoints could be available for any objects (currently implemented for quotas and events count). The data is available at *<object_endpoint>/history/*, for example: */api/quotas/<uuid>/history/*. There are two ways to define datetime points for historical data. 1. Send *?point=<timestamp>* parameter that can list. Response will contain historical data for each given point in the same order. 2. Send *?start=<timestamp>*, *?end=<timestamp>*, *?points_count=<integer>* parameters. Result will contain <points_count> points from <start> to <end>. Response format: .. code-block:: javascript [ { "point": <timestamp>, "object": {<object_representation>} }, { "point": <timestamp> "object": {<object_representation>} }, ... ] NB! There will not be any "object" for corresponding point in response if there is no data about object for a given timestamp. def history(self, request, uuid=None): """ Historical data endpoints could be available for any objects (currently implemented for quotas and events count). The data is available at *<object_endpoint>/history/*, for example: */api/quotas/<uuid>/history/*. There are two ways to define datetime points for historical data. 1. Send *?point=<timestamp>* parameter that can list. Response will contain historical data for each given point in the same order. 2. Send *?start=<timestamp>*, *?end=<timestamp>*, *?points_count=<integer>* parameters. Result will contain <points_count> points from <start> to <end>. Response format: .. code-block:: javascript [ { "point": <timestamp>, "object": {<object_representation>} }, { "point": <timestamp> "object": {<object_representation>} }, ... ] NB! There will not be any "object" for corresponding point in response if there is no data about object for a given timestamp. """ mapped = { 'start': request.query_params.get('start'), 'end': request.query_params.get('end'), 'points_count': request.query_params.get('points_count'), 'point_list': request.query_params.getlist('point'), } history_serializer = HistorySerializer(data={k: v for k, v in mapped.items() if v}) history_serializer.is_valid(raise_exception=True) quota = self.get_object() serializer = self.get_serializer(quota) serialized_versions = [] for point_date in history_serializer.get_filter_data(): serialized = {'point': datetime_to_timestamp(point_date)} version = Version.objects.get_for_object(quota).filter(revision__date_created__lte=point_date) if version.exists(): # make copy of serialized data and update field that are stored in version version_object = version.first()._object_version.object serialized['object'] = serializer.data.copy() serialized['object'].update({ f: getattr(version_object, f) for f in quota.get_version_fields() }) serialized_versions.append(serialized) return response.Response(serialized_versions, status=status.HTTP_200_OK)
Gets object url def _get_url(self, obj): """ Gets object url """ format_kwargs = { 'app_label': obj._meta.app_label, } try: format_kwargs['model_name'] = getattr(obj.__class__, 'get_url_name')() except AttributeError: format_kwargs['model_name'] = obj._meta.object_name.lower() return self._default_view_name % format_kwargs
Serializes any object to his url representation def to_representation(self, obj): """ Serializes any object to his url representation """ kwargs = None for field in self.lookup_fields: if hasattr(obj, field): kwargs = {field: getattr(obj, field)} break if kwargs is None: raise AttributeError('Related object does not have any of lookup_fields') request = self._get_request() return request.build_absolute_uri(reverse(self._get_url(obj), kwargs=kwargs))
Restores model instance from its url def to_internal_value(self, data): """ Restores model instance from its url """ if not data: return None request = self._get_request() user = request.user try: obj = core_utils.instance_from_url(data, user=user) model = obj.__class__ except ValueError: raise serializers.ValidationError(_('URL is invalid: %s.') % data) except (Resolver404, AttributeError, MultipleObjectsReturned, ObjectDoesNotExist): raise serializers.ValidationError(_("Can't restore object from url: %s") % data) if model not in self.related_models: raise serializers.ValidationError(_('%s object does not support such relationship.') % six.text_type(obj)) return obj
Check that the start is before the end. def validate(self, data): """ Check that the start is before the end. """ if 'start' in data and 'end' in data and data['start'] >= data['end']: raise serializers.ValidationError(_('End must occur after start.')) return data
A helper function to deal with waldur_core "high-level" tasks. Define high-level task with explicit name using a pattern: waldur_core.<app_label>.<task_name> .. code-block:: python @shared_task(name='waldur_core.openstack.provision_instance') def provision_instance_fn(instance_uuid, backend_flavor_id) pass Call it by name: .. code-block:: python send_task('openstack', 'provision_instance')(instance_uuid, backend_flavor_id) Which is identical to: .. code-block:: python provision_instance_fn.delay(instance_uuid, backend_flavor_id) def send_task(app_label, task_name): """ A helper function to deal with waldur_core "high-level" tasks. Define high-level task with explicit name using a pattern: waldur_core.<app_label>.<task_name> .. code-block:: python @shared_task(name='waldur_core.openstack.provision_instance') def provision_instance_fn(instance_uuid, backend_flavor_id) pass Call it by name: .. code-block:: python send_task('openstack', 'provision_instance')(instance_uuid, backend_flavor_id) Which is identical to: .. code-block:: python provision_instance_fn.delay(instance_uuid, backend_flavor_id) """ def delay(*args, **kwargs): full_task_name = 'waldur_core.%s.%s' % (app_label, task_name) send_celery_task(full_task_name, args, kwargs, countdown=2) return delay
Add description to celery log output def log_celery_task(request): """ Add description to celery log output """ task = request.task description = None if isinstance(task, Task): try: description = task.get_description(*request.args, **request.kwargs) except NotImplementedError: pass except Exception as e: # Logging should never break workflow. logger.exception('Cannot get description for task %s. Error: %s' % (task.__class__.__name__, e)) return '{0.name}[{0.id}]{1}{2}{3}'.format( request, ' {0}'.format(description) if description else '', ' eta:[{0}]'.format(request.eta) if request.eta else '', ' expires:[{0}]'.format(request.expires) if request.expires else '', )
Deserialize input data and start backend operation execution def run(self, serialized_instance, *args, **kwargs): """ Deserialize input data and start backend operation execution """ try: instance = utils.deserialize_instance(serialized_instance) except ObjectDoesNotExist: message = ('Cannot restore instance from serialized object %s. Probably it was deleted.' % serialized_instance) six.reraise(ObjectDoesNotExist, message) self.args = args self.kwargs = kwargs self.pre_execute(instance) result = self.execute(instance, *self.args, **self.kwargs) self.post_execute(instance) if result and isinstance(result, django_models.Model): result = utils.serialize_instance(result) return result
Return True if exist task that is equal to current and is uncompleted def is_previous_task_processing(self, *args, **kwargs): """ Return True if exist task that is equal to current and is uncompleted """ app = self._get_app() inspect = app.control.inspect() active = inspect.active() or {} scheduled = inspect.scheduled() or {} reserved = inspect.reserved() or {} uncompleted = sum(list(active.values()) + list(scheduled.values()) + reserved.values(), []) return any(self.is_equal(task, *args, **kwargs) for task in uncompleted)
Do not run background task if previous task is uncompleted def apply_async(self, args=None, kwargs=None, **options): """ Do not run background task if previous task is uncompleted """ if self.is_previous_task_processing(*args, **kwargs): message = 'Background task %s was not scheduled, because its predecessor is not completed yet.' % self.name logger.info(message) # It is expected by Celery that apply_async return AsyncResult, otherwise celerybeat dies return self.AsyncResult(options.get('task_id') or str(uuid4())) return super(BackgroundTask, self).apply_async(args=args, kwargs=kwargs, **options)
Returns key to be used in cache def _get_cache_key(self, args, kwargs): """ Returns key to be used in cache """ hash_input = json.dumps({'name': self.name, 'args': args, 'kwargs': kwargs}, sort_keys=True) # md5 is used for internal caching, not need to care about security return hashlib.md5(hash_input).hexdigest()
Checks whether task must be skipped and decreases the counter in that case. def apply_async(self, args=None, kwargs=None, **options): """ Checks whether task must be skipped and decreases the counter in that case. """ key = self._get_cache_key(args, kwargs) counter, penalty = cache.get(key, (0, 0)) if not counter: return super(PenalizedBackgroundTask, self).apply_async(args=args, kwargs=kwargs, **options) cache.set(key, (counter - 1, penalty), self.CACHE_LIFETIME) logger.info('The task %s will not be executed due to the penalty.' % self.name) return self.AsyncResult(options.get('task_id') or str(uuid4()))
Increases penalty for the task and resets the counter. def on_failure(self, exc, task_id, args, kwargs, einfo): """ Increases penalty for the task and resets the counter. """ key = self._get_cache_key(args, kwargs) _, penalty = cache.get(key, (0, 0)) if penalty < self.MAX_PENALTY: penalty += 1 logger.debug('The task %s is penalized and will be executed on %d run.' % (self.name, penalty)) cache.set(key, (penalty, penalty), self.CACHE_LIFETIME) return super(PenalizedBackgroundTask, self).on_failure(exc, task_id, args, kwargs, einfo)
Clears cache for the task. def on_success(self, retval, task_id, args, kwargs): """ Clears cache for the task. """ key = self._get_cache_key(args, kwargs) if cache.get(key) is not None: cache.delete(key) logger.debug('Penalty for the task %s has been removed.' % self.name) return super(PenalizedBackgroundTask, self).on_success(retval, task_id, args, kwargs)
Logging for backend method. Expects django model instance as first argument. def log_backend_action(action=None): """ Logging for backend method. Expects django model instance as first argument. """ def decorator(func): @functools.wraps(func) def wrapped(self, instance, *args, **kwargs): action_name = func.func_name.replace('_', ' ') if action is None else action logger.debug('About to %s `%s` (PK: %s).', action_name, instance, instance.pk) result = func(self, instance, *args, **kwargs) logger.debug('Action `%s` was executed successfully for `%s` (PK: %s).', action_name, instance, instance.pk) return result return wrapped return decorator
Get a list of services endpoints. { "Oracle": "/api/oracle/", "OpenStack": "/api/openstack/", "GitLab": "/api/gitlab/", "DigitalOcean": "/api/digitalocean/" } def get_services(cls, request=None): """ Get a list of services endpoints. { "Oracle": "/api/oracle/", "OpenStack": "/api/openstack/", "GitLab": "/api/gitlab/", "DigitalOcean": "/api/digitalocean/" } """ return {service['name']: reverse(service['list_view'], request=request) for service in cls._registry.values()}
Get a list of resources endpoints. { "DigitalOcean.Droplet": "/api/digitalocean-droplets/", "Oracle.Database": "/api/oracle-databases/", "GitLab.Group": "/api/gitlab-groups/", "GitLab.Project": "/api/gitlab-projects/" } def get_resources(cls, request=None): """ Get a list of resources endpoints. { "DigitalOcean.Droplet": "/api/digitalocean-droplets/", "Oracle.Database": "/api/oracle-databases/", "GitLab.Group": "/api/gitlab-groups/", "GitLab.Project": "/api/gitlab-projects/" } """ return {'.'.join([service['name'], resource['name']]): reverse(resource['list_view'], request=request) for service in cls._registry.values() for resource in service['resources'].values()}
Get a list of services and resources endpoints. { ... "GitLab": { "url": "/api/gitlab/", "service_project_link_url": "/api/gitlab-service-project-link/", "resources": { "Project": "/api/gitlab-projects/", "Group": "/api/gitlab-groups/" } }, ... } def get_services_with_resources(cls, request=None): """ Get a list of services and resources endpoints. { ... "GitLab": { "url": "/api/gitlab/", "service_project_link_url": "/api/gitlab-service-project-link/", "resources": { "Project": "/api/gitlab-projects/", "Group": "/api/gitlab-groups/" } }, ... } """ from django.apps import apps data = {} for service in cls._registry.values(): service_model = apps.get_model(service['model_name']) service_project_link = service_model.projects.through service_project_link_url = reverse(cls.get_list_view_for_model(service_project_link), request=request) data[service['name']] = { 'url': reverse(service['list_view'], request=request), 'service_project_link_url': service_project_link_url, 'resources': {resource['name']: reverse(resource['list_view'], request=request) for resource in service['resources'].values()}, 'properties': {resource['name']: reverse(resource['list_view'], request=request) for resource in service.get('properties', {}).values()}, 'is_public_service': cls.is_public_service(service_model) } return data
Get a list of service models. { ... 'gitlab': { "service": nodeconductor_gitlab.models.GitLabService, "service_project_link": nodeconductor_gitlab.models.GitLabServiceProjectLink, "resources": [ nodeconductor_gitlab.models.Group, nodeconductor_gitlab.models.Project ], }, ... } def get_service_models(cls): """ Get a list of service models. { ... 'gitlab': { "service": nodeconductor_gitlab.models.GitLabService, "service_project_link": nodeconductor_gitlab.models.GitLabServiceProjectLink, "resources": [ nodeconductor_gitlab.models.Group, nodeconductor_gitlab.models.Project ], }, ... } """ from django.apps import apps data = {} for key, service in cls._registry.items(): service_model = apps.get_model(service['model_name']) service_project_link = service_model.projects.through data[key] = { 'service': service_model, 'service_project_link': service_project_link, 'resources': [apps.get_model(r) for r in service['resources'].keys()], 'properties': [apps.get_model(r) for r in service['properties'].keys() if '.' in r], } return data
Get a list of resource models. { 'DigitalOcean.Droplet': waldur_digitalocean.models.Droplet, 'JIRA.Project': waldur_jira.models.Project, 'OpenStack.Tenant': waldur_openstack.models.Tenant } def get_resource_models(cls): """ Get a list of resource models. { 'DigitalOcean.Droplet': waldur_digitalocean.models.Droplet, 'JIRA.Project': waldur_jira.models.Project, 'OpenStack.Tenant': waldur_openstack.models.Tenant } """ from django.apps import apps return {'.'.join([service['name'], attrs['name']]): apps.get_model(resource) for service in cls._registry.values() for resource, attrs in service['resources'].items()}
Get resource models by service model def get_service_resources(cls, model): """ Get resource models by service model """ key = cls.get_model_key(model) return cls.get_service_name_resources(key)
Get resource models by service name def get_service_name_resources(cls, service_name): """ Get resource models by service name """ from django.apps import apps resources = cls._registry[service_name]['resources'].keys() return [apps.get_model(resource) for resource in resources]
Get a name for given class or model: -- it's a service type for a service -- it's a <service_type>.<resource_model_name> for a resource def get_name_for_model(cls, model): """ Get a name for given class or model: -- it's a service type for a service -- it's a <service_type>.<resource_model_name> for a resource """ key = cls.get_model_key(model) model_str = cls._get_model_str(model) service = cls._registry[key] if model_str in service['resources']: return '{}.{}'.format(service['name'], service['resources'][model_str]['name']) else: return service['name']
Get a dictionary with related structure models for given class or model: >> SupportedServices.get_related_models(gitlab_models.Project) { 'service': nodeconductor_gitlab.models.GitLabService, 'service_project_link': nodeconductor_gitlab.models.GitLabServiceProjectLink, 'resources': [ nodeconductor_gitlab.models.Group, nodeconductor_gitlab.models.Project, ] } def get_related_models(cls, model): """ Get a dictionary with related structure models for given class or model: >> SupportedServices.get_related_models(gitlab_models.Project) { 'service': nodeconductor_gitlab.models.GitLabService, 'service_project_link': nodeconductor_gitlab.models.GitLabServiceProjectLink, 'resources': [ nodeconductor_gitlab.models.Group, nodeconductor_gitlab.models.Project, ] } """ from waldur_core.structure.models import ServiceSettings if isinstance(model, ServiceSettings): model_str = cls._registry.get(model.type, {}).get('model_name', '') else: model_str = cls._get_model_str(model) for models in cls.get_service_models().values(): if model_str == cls._get_model_str(models['service']) or \ model_str == cls._get_model_str(models['service_project_link']): return models for resource_model in models['resources']: if model_str == cls._get_model_str(resource_model): return models
Check is model app name is in list of INSTALLED_APPS def _is_active_model(cls, model): """ Check is model app name is in list of INSTALLED_APPS """ # We need to use such tricky way to check because of inconsistent apps names: # some apps are included in format "<module_name>.<app_name>" like "waldur_core.openstack" # other apps are included in format "<app_name>" like "nodecondcutor_sugarcrm" return ('.'.join(model.__module__.split('.')[:2]) in settings.INSTALLED_APPS or '.'.join(model.__module__.split('.')[:1]) in settings.INSTALLED_APPS)
Send events as push notification via Google Cloud Messaging. Expected settings as follows: # https://developers.google.com/mobile/add WALDUR_CORE['GOOGLE_API'] = { 'NOTIFICATION_TITLE': "Waldur notification", 'Android': { 'server_key': 'AIzaSyA2_7UaVIxXfKeFvxTjQNZbrzkXG9OTCkg', }, 'iOS': { 'server_key': 'AIzaSyA34zlG_y5uHOe2FmcJKwfk2vG-3RW05vk', } } def process(self, event): """ Send events as push notification via Google Cloud Messaging. Expected settings as follows: # https://developers.google.com/mobile/add WALDUR_CORE['GOOGLE_API'] = { 'NOTIFICATION_TITLE': "Waldur notification", 'Android': { 'server_key': 'AIzaSyA2_7UaVIxXfKeFvxTjQNZbrzkXG9OTCkg', }, 'iOS': { 'server_key': 'AIzaSyA34zlG_y5uHOe2FmcJKwfk2vG-3RW05vk', } } """ conf = settings.WALDUR_CORE.get('GOOGLE_API') or {} keys = conf.get(dict(self.Type.CHOICES)[self.type]) if not keys or not self.token: return endpoint = 'https://gcm-http.googleapis.com/gcm/send' headers = { 'Content-Type': 'application/json', 'Authorization': 'key=%s' % keys['server_key'], } payload = { 'to': self.token, 'notification': { 'body': event.get('message', 'New event'), 'title': conf.get('NOTIFICATION_TITLE', 'Waldur notification'), 'image': 'icon', }, 'data': { 'event': event }, } if self.type == self.Type.IOS: payload['content-available'] = '1' logger.debug('Submitting GCM push notification with headers %s, payload: %s' % (headers, payload)) requests.post(endpoint, json=payload, headers=headers)
Extracts context data from request headers according to specified schema. >>> from lxml import etree as et >>> from datetime import date >>> from pyws.functions.args import TypeFactory >>> Fake = type('Fake', (object, ), {}) >>> request = Fake() >>> request.parsed_data = Fake() >>> request.parsed_data.xml = et.fromstring( ... '<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/">' ... '<s:Header>' ... '<headers>' ... '<string>hello</string>' ... '<number>100</number>' ... '<date>2011-08-12</date>' ... '</headers>' ... '</s:Header>' ... '</s:Envelope>') >>> data = get_context_data_from_headers(request, TypeFactory( ... {0: 'Headers', 'string': str, 'number': int, 'date': date})) >>> data == {'string': 'hello', 'number': 100, 'date': date(2011, 8, 12)} True def get_context_data_from_headers(request, headers_schema): """ Extracts context data from request headers according to specified schema. >>> from lxml import etree as et >>> from datetime import date >>> from pyws.functions.args import TypeFactory >>> Fake = type('Fake', (object, ), {}) >>> request = Fake() >>> request.parsed_data = Fake() >>> request.parsed_data.xml = et.fromstring( ... '<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/">' ... '<s:Header>' ... '<headers>' ... '<string>hello</string>' ... '<number>100</number>' ... '<date>2011-08-12</date>' ... '</headers>' ... '</s:Header>' ... '</s:Envelope>') >>> data = get_context_data_from_headers(request, TypeFactory( ... {0: 'Headers', 'string': str, 'number': int, 'date': date})) >>> data == {'string': 'hello', 'number': 100, 'date': date(2011, 8, 12)} True """ if not headers_schema: return None env = request.parsed_data.xml.xpath( '/soap:Envelope', namespaces=SoapProtocol.namespaces)[0] header = env.xpath( './soap:Header/*', namespaces=SoapProtocol.namespaces) if len(header) < 1: return None return headers_schema.validate(xml2obj(header[0], headers_schema))
Decorator to make a function that takes no arguments use the LazyConstant class. def lazy_constant(fn): """Decorator to make a function that takes no arguments use the LazyConstant class.""" class NewLazyConstant(LazyConstant): @functools.wraps(fn) def __call__(self): return self.get_value() return NewLazyConstant(fn)
Decorator that adds an LRU cache of size maxsize to the decorated function. maxsize is the number of different keys cache can accomodate. key_fn is the function that builds key from args. The default key function creates a tuple out of args and kwargs. If you use the default, there is no reason not to use functools.lru_cache directly. Possible use cases: - Your cache key is very large, so you don't want to keep the whole key in memory. - The function takes some arguments that don't affect the result. def lru_cache(maxsize=128, key_fn=None): """Decorator that adds an LRU cache of size maxsize to the decorated function. maxsize is the number of different keys cache can accomodate. key_fn is the function that builds key from args. The default key function creates a tuple out of args and kwargs. If you use the default, there is no reason not to use functools.lru_cache directly. Possible use cases: - Your cache key is very large, so you don't want to keep the whole key in memory. - The function takes some arguments that don't affect the result. """ def decorator(fn): cache = LRUCache(maxsize) argspec = inspect2.getfullargspec(fn) arg_names = argspec.args[1:] + argspec.kwonlyargs # remove self kwargs_defaults = get_kwargs_defaults(argspec) cache_key = key_fn if cache_key is None: def cache_key(args, kwargs): return get_args_tuple(args, kwargs, arg_names, kwargs_defaults) @functools.wraps(fn) def wrapper(*args, **kwargs): key = cache_key(args, kwargs) try: return cache[key] except KeyError: value = fn(*args, **kwargs) cache[key] = value return value wrapper.clear = cache.clear return wrapper return decorator
Decorator that adds caching to an instance method. The cached value is stored so that it gets garbage collected together with the instance. The cached values are not stored when the object is pickled. def cached_per_instance(): """Decorator that adds caching to an instance method. The cached value is stored so that it gets garbage collected together with the instance. The cached values are not stored when the object is pickled. """ def cache_fun(fun): argspec = inspect2.getfullargspec(fun) arg_names = argspec.args[1:] + argspec.kwonlyargs # remove self kwargs_defaults = get_kwargs_defaults(argspec) cache = {} def cache_key(args, kwargs): return get_args_tuple(args, kwargs, arg_names, kwargs_defaults) def clear_cache(instance_key, ref): del cache[instance_key] @functools.wraps(fun) def new_fun(self, *args, **kwargs): instance_key = id(self) if instance_key not in cache: ref = weakref.ref(self, functools.partial(clear_cache, instance_key)) cache[instance_key] = (ref, {}) instance_cache = cache[instance_key][1] k = cache_key(args, kwargs) if k not in instance_cache: instance_cache[k] = fun(self, *args, **kwargs) return instance_cache[k] # just so unit tests can check that this is cleaned up correctly new_fun.__cached_per_instance_cache__ = cache return new_fun return cache_fun
Generates a cache key from the passed in arguments. def get_args_tuple(args, kwargs, arg_names, kwargs_defaults): """Generates a cache key from the passed in arguments.""" args_list = list(args) args_len = len(args) all_args_len = len(arg_names) try: while args_len < all_args_len: arg_name = arg_names[args_len] if arg_name in kwargs_defaults: args_list.append(kwargs.get(arg_name, kwargs_defaults[arg_name])) else: args_list.append(kwargs[arg_name]) args_len += 1 except KeyError as e: raise TypeError("Missing argument %r" % (e.args[0],)) return tuple(args_list)
Computes a kwargs_defaults dictionary for use by get_args_tuple given an argspec. def get_kwargs_defaults(argspec): """Computes a kwargs_defaults dictionary for use by get_args_tuple given an argspec.""" arg_names = tuple(argspec.args) defaults = argspec.defaults or () num_args = len(argspec.args) - len(defaults) kwargs_defaults = {} for i, default_value in enumerate(defaults): kwargs_defaults[arg_names[num_args + i]] = default_value if getattr(argspec, "kwonlydefaults", None): kwargs_defaults.update(argspec.kwonlydefaults) return kwargs_defaults
Memoizes return values of the decorated function. Similar to l0cache, but the cache persists for the duration of the process, unless clear_cache() is called on the function. def memoize(fun): """Memoizes return values of the decorated function. Similar to l0cache, but the cache persists for the duration of the process, unless clear_cache() is called on the function. """ argspec = inspect2.getfullargspec(fun) arg_names = argspec.args + argspec.kwonlyargs kwargs_defaults = get_kwargs_defaults(argspec) def cache_key(args, kwargs): return get_args_tuple(args, kwargs, arg_names, kwargs_defaults) @functools.wraps(fun) def new_fun(*args, **kwargs): k = cache_key(args, kwargs) if k not in new_fun.__cache: new_fun.__cache[k] = fun(*args, **kwargs) return new_fun.__cache[k] def clear_cache(): """Removes all cached values for this function.""" new_fun.__cache.clear() new_fun.__cache = {} new_fun.clear_cache = clear_cache return new_fun
Memoizes return values of the decorated function for a given time-to-live. Similar to l0cache, but the cache persists for the duration of the process, unless clear_cache() is called on the function or the time-to-live expires. By default, the time-to-live is set to 24 hours. def memoize_with_ttl(ttl_secs=60 * 60 * 24): """Memoizes return values of the decorated function for a given time-to-live. Similar to l0cache, but the cache persists for the duration of the process, unless clear_cache() is called on the function or the time-to-live expires. By default, the time-to-live is set to 24 hours. """ error_msg = ( "Incorrect usage of qcore.caching.memoize_with_ttl: " "ttl_secs must be a positive integer." ) assert_is_instance(ttl_secs, six.integer_types, error_msg) assert_gt(ttl_secs, 0, error_msg) def cache_fun(fun): argspec = inspect2.getfullargspec(fun) arg_names = argspec.args + argspec.kwonlyargs kwargs_defaults = get_kwargs_defaults(argspec) def cache_key(args, kwargs): return repr(get_args_tuple(args, kwargs, arg_names, kwargs_defaults)) @functools.wraps(fun) def new_fun(*args, **kwargs): k = cache_key(args, kwargs) current_time = int(time.time()) # k is not in the cache; perform the function and cache the result. if k not in new_fun.__cache or k not in new_fun.__cache_times: new_fun.__cache[k] = fun(*args, **kwargs) new_fun.__cache_times[k] = current_time return new_fun.__cache[k] # k is in the cache at this point. Check if the ttl has expired; # if so, recompute the value and cache it. cache_time = new_fun.__cache_times[k] if current_time - cache_time > ttl_secs: new_fun.__cache[k] = fun(*args, **kwargs) new_fun.__cache_times[k] = current_time # finally, return the cached result. return new_fun.__cache[k] def clear_cache(): """Removes all cached values for this function.""" new_fun.__cache.clear() new_fun.__cache_times.clear() def dirty(*args, **kwargs): """Dirties the function for a given set of arguments.""" k = cache_key(args, kwargs) new_fun.__cache.pop(k, None) new_fun.__cache_times.pop(k, None) new_fun.__cache = {} new_fun.__cache_times = {} new_fun.clear_cache = clear_cache new_fun.dirty = dirty return new_fun return cache_fun
Returns the value of the constant. def get_value(self): """Returns the value of the constant.""" if self.value is not_computed: self.value = self.value_provider() if self.value is not_computed: return None return self.value
Computes the value. Does not look at the cache. def compute(self): """Computes the value. Does not look at the cache.""" self.value = self.value_provider() if self.value is not_computed: return None else: return self.value
Return the value for given key if it exists. def get(self, key, default=miss): """Return the value for given key if it exists.""" if key not in self._dict: return default # invokes __getitem__, which updates the item return self[key]
Empty the cache and optionally invoke item_evicted callback. def clear(self, omit_item_evicted=False): """Empty the cache and optionally invoke item_evicted callback.""" if not omit_item_evicted: items = self._dict.items() for key, value in items: self._evict_item(key, value) self._dict.clear()
Compatibility wrapper for the loop.create_future() call introduced in 3.5.2. def create_future(loop): # pragma: no cover """Compatibility wrapper for the loop.create_future() call introduced in 3.5.2.""" if hasattr(loop, 'create_future'): return loop.create_future() return asyncio.Future(loop=loop)
Compatibility wrapper for the loop.create_task() call introduced in 3.4.2. def create_task(coro, loop): # pragma: no cover """Compatibility wrapper for the loop.create_task() call introduced in 3.4.2.""" if hasattr(loop, 'create_task'): return loop.create_task(coro) return asyncio.Task(coro, loop=loop)
为类添加代理属性 def proxy_property_directly(bind_attr, attrs): """ 为类添加代理属性 """ def cls_builder(cls): """ 添加到类 """ for attr_name in attrs: setattr(cls, attr_name, _make_proxy_property(bind_attr, attr_name)) return cls return cls_builder
Return query dictionary to search objects available to user. def get_permitted_objects_uuids(cls, user): """ Return query dictionary to search objects available to user. """ uuids = filter_queryset_for_user(cls.objects.all(), user).values_list('uuid', flat=True) key = core_utils.camel_case_to_underscore(cls.__name__) + '_uuid' return {key: uuids}
Checks whether user has role in entity. `timestamp` can have following values: - False - check whether user has role in entity at the moment. - None - check whether user has permanent role in entity. - Datetime object - check whether user will have role in entity at specific timestamp. def has_user(self, user, role=None, timestamp=False): """ Checks whether user has role in entity. `timestamp` can have following values: - False - check whether user has role in entity at the moment. - None - check whether user has permanent role in entity. - Datetime object - check whether user will have role in entity at specific timestamp. """ permissions = self.permissions.filter(user=user, is_active=True) if role is not None: permissions = permissions.filter(role=role) if timestamp is None: permissions = permissions.filter(expiration_time=None) elif timestamp: permissions = permissions.filter(Q(expiration_time=None) | Q(expiration_time__gte=timestamp)) return permissions.exists()
Identify the contents of `buf` def from_buffer(self, buf): """ Identify the contents of `buf` """ with self.lock: try: # if we're on python3, convert buf to bytes # otherwise this string is passed as wchar* # which is not what libmagic expects if isinstance(buf, str) and str != bytes: buf = buf.encode('utf-8', errors='replace') return maybe_decode(magic_buffer(self.cookie, buf)) except MagicException as e: return self._handle509Bug(e)
Starts listening to events. Args: timeout (int): number of seconds before timeout. Used for testing purpose only. root_object (bambou.NURESTRootObject): NURESTRootObject object that is listening. Used for testing purpose only. def start(self, timeout=None, root_object=None): """ Starts listening to events. Args: timeout (int): number of seconds before timeout. Used for testing purpose only. root_object (bambou.NURESTRootObject): NURESTRootObject object that is listening. Used for testing purpose only. """ if self._is_running: return if timeout: self._timeout = timeout self._start_time = int(time()) pushcenter_logger.debug("[NURESTPushCenter] Starting push center on url %s ..." % self.url) self._is_running = True self.__root_object = root_object from .nurest_session import NURESTSession current_session = NURESTSession.get_current_session() args_session = {'session': current_session} self._thread = StoppableThread(target=self._listen, name='push-center', kwargs=args_session) self._thread.daemon = True self._thread.start()
Stops listening for events. def stop(self): """ Stops listening for events. """ if not self._is_running: return pushcenter_logger.debug("[NURESTPushCenter] Stopping...") self._thread.stop() self._thread.join() self._is_running = False self._current_connection = None self._start_time = None self._timeout = None
Wait until thread exit Used for testing purpose only def wait_until_exit(self): """ Wait until thread exit Used for testing purpose only """ if self._timeout is None: raise Exception("Thread will never exit. Use stop or specify timeout when starting it!") self._thread.join() self.stop()
Receive an event from connection def _did_receive_event(self, connection): """ Receive an event from connection """ if not self._is_running: return if connection.has_timeouted: return response = connection.response data = None if response.status_code != 200: pushcenter_logger.error("[NURESTPushCenter]: Connection failure [%s] %s" % (response.status_code, response.errors)) else: data = response.data if len(self._delegate_methods) > 0: for m in self._delegate_methods: try: m(data) except Exception as exc: pushcenter_logger.error("[NURESTPushCenter] Delegate method %s failed:\n%s" % (m, exc)) elif data: events = data['events'] self.nb_events_received += len(events) self.nb_push_received += 1 pushcenter_logger.info("[NURESTPushCenter] Received Push #%s (total=%s, latest=%s)\n%s" % (self.nb_push_received, self.nb_events_received, len(events), json.dumps(events, indent=4))) self._last_events.extend(events) if self._is_running: uuid = None if data and 'uuid' in data: uuid = data['uuid'] self._listen(uuid)
Listen a connection uuid def _listen(self, uuid=None, session=None): """ Listen a connection uuid """ if self.url is None: raise Exception("NURESTPushCenter needs to have a valid URL. please use setURL: before starting it.") events_url = "%s/events" % self.url if uuid: events_url = "%s?uuid=%s" % (events_url, uuid) request = NURESTRequest(method='GET', url=events_url) # Force async to False so the push center will have only 1 thread running connection = NURESTConnection(request=request, async=True, callback=self._did_receive_event, root_object=self._root_object) if self._timeout: if int(time()) - self._start_time >= self._timeout: pushcenter_logger.debug("[NURESTPushCenter] Timeout (timeout=%ss)." % self._timeout) return else: connection.timeout = self._timeout pushcenter_logger.info('Bambou Sending >>>>>>\n%s %s' % (request.method, request.url)) # connection.ignore_request_idle = True connection.start()
Registers a new delegate callback The prototype should be function(data), where data will be the decoded json push Args: callback (function): method to trigger when push center receives events def add_delegate(self, callback): """ Registers a new delegate callback The prototype should be function(data), where data will be the decoded json push Args: callback (function): method to trigger when push center receives events """ if callback in self._delegate_methods: return self._delegate_methods.append(callback)