text
stringlengths 81
112k
|
|---|
Unregisters a registered delegate function or a method.
Args:
callback(function): method to trigger when push center receives events
def remove_delegate(self, callback):
""" Unregisters a registered delegate function or a method.
Args:
callback(function): method to trigger when push center receives events
"""
if callback not in self._delegate_methods:
return
self._delegate_methods.remove(callback)
|
Reads the configuration file if any
def _read_config(cls):
""" Reads the configuration file if any
"""
cls._config_parser = configparser.ConfigParser()
cls._config_parser.read(cls._default_attribute_values_configuration_file_path)
|
Gets the default value of a given property for a given object.
These properties can be set in a config INI file looking like
.. code-block:: ini
[NUEntity]
default_behavior = THIS
speed = 1000
[NUOtherEntity]
attribute_name = a value
This will be used when creating a :class:`bambou.NURESTObject` when no parameter or data is provided
def get_default_attribute_value(cls, object_class, property_name, attr_type=str):
""" Gets the default value of a given property for a given object.
These properties can be set in a config INI file looking like
.. code-block:: ini
[NUEntity]
default_behavior = THIS
speed = 1000
[NUOtherEntity]
attribute_name = a value
This will be used when creating a :class:`bambou.NURESTObject` when no parameter or data is provided
"""
if not cls._default_attribute_values_configuration_file_path:
return None
if not cls._config_parser:
cls._read_config()
class_name = object_class.__name__
if not cls._config_parser.has_section(class_name):
return None
if not cls._config_parser.has_option(class_name, property_name):
return None
if sys.version_info < (3,):
integer_types = (int, long,)
else:
integer_types = (int,)
if isinstance(attr_type, integer_types):
return cls._config_parser.getint(class_name, property_name)
elif attr_type is bool:
return cls._config_parser.getboolean(class_name, property_name)
else:
return cls._config_parser.get(class_name, property_name)
|
Filter each resource separately using its own filter
def filter(self, request, queryset, view):
""" Filter each resource separately using its own filter """
summary_queryset = queryset
filtered_querysets = []
for queryset in summary_queryset.querysets:
filter_class = self._get_filter(queryset)
queryset = filter_class(request.query_params, queryset=queryset).qs
filtered_querysets.append(queryset)
summary_queryset.querysets = filtered_querysets
return summary_queryset
|
This function creates a standard form type from a simplified form.
>>> from datetime import date, datetime
>>> from pyws.functions.args import TypeFactory
>>> from pyws.functions.args import String, Integer, Float, Date, DateTime
>>> TypeFactory(str) == String
True
>>> TypeFactory(float) == Float
True
>>> TypeFactory(date) == Date
True
>>> TypeFactory(datetime) == DateTime
True
>>> from operator import attrgetter
>>> from pyws.functions.args import Dict
>>> dct = TypeFactory({0: 'HelloWorldDict', 'hello': str, 'world': int})
>>> issubclass(dct, Dict)
True
>>> dct.__name__
'HelloWorldDict'
>>> fields = sorted(dct.fields, key=attrgetter('name'))
>>> len(dct.fields)
2
>>> fields[0].name == 'hello'
True
>>> fields[0].type == String
True
>>> fields[1].name == 'world'
True
>>> fields[1].type == Integer
True
>>> from pyws.functions.args import List
>>> lst = TypeFactory([int])
>>> issubclass(lst, List)
True
>>> lst.__name__
'IntegerList'
>>> lst.element_type == Integer
True
def TypeFactory(type_):
"""
This function creates a standard form type from a simplified form.
>>> from datetime import date, datetime
>>> from pyws.functions.args import TypeFactory
>>> from pyws.functions.args import String, Integer, Float, Date, DateTime
>>> TypeFactory(str) == String
True
>>> TypeFactory(float) == Float
True
>>> TypeFactory(date) == Date
True
>>> TypeFactory(datetime) == DateTime
True
>>> from operator import attrgetter
>>> from pyws.functions.args import Dict
>>> dct = TypeFactory({0: 'HelloWorldDict', 'hello': str, 'world': int})
>>> issubclass(dct, Dict)
True
>>> dct.__name__
'HelloWorldDict'
>>> fields = sorted(dct.fields, key=attrgetter('name'))
>>> len(dct.fields)
2
>>> fields[0].name == 'hello'
True
>>> fields[0].type == String
True
>>> fields[1].name == 'world'
True
>>> fields[1].type == Integer
True
>>> from pyws.functions.args import List
>>> lst = TypeFactory([int])
>>> issubclass(lst, List)
True
>>> lst.__name__
'IntegerList'
>>> lst.element_type == Integer
True
"""
if isinstance(type_, type) and issubclass(type_, Type):
return type_
for x in __types__:
if x.represents(type_):
return x.get(type_)
raise UnknownType(type_)
|
Generate empty image in temporary file for testing
def dummy_image(filetype='gif'):
""" Generate empty image in temporary file for testing """
# 1x1px Transparent GIF
GIF = 'R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7'
tmp_file = tempfile.NamedTemporaryFile(suffix='.%s' % filetype)
tmp_file.write(base64.b64decode(GIF))
return open(tmp_file.name, 'rb')
|
Gets time delta in microseconds.
Note: Do NOT use this function without keyword arguments.
It will become much-much harder to add extra time ranges later if positional arguments are used.
def utime_delta(days=0, hours=0, minutes=0, seconds=0):
"""Gets time delta in microseconds.
Note: Do NOT use this function without keyword arguments.
It will become much-much harder to add extra time ranges later if positional arguments are used.
"""
return (days * DAY) + (hours * HOUR) + (minutes * MINUTE) + (seconds * SECOND)
|
Executes specified function with timeout. Uses SIGALRM to interrupt it.
:type fn: function
:param fn: function to execute
:type args: tuple
:param args: function args
:type kwargs: dict
:param kwargs: function kwargs
:type timeout: float
:param timeout: timeout, seconds; 0 or None means no timeout
:type fail_if_no_timer: bool
:param fail_if_no_timer: fail, if timer is nor available; normally it's available only in the
main thread
:type signal_type: signalnum
:param signal_type: type of signal to use (see signal module)
:type timer_type: signal.ITIMER_REAL, signal.ITIMER_VIRTUAL or signal.ITIMER_PROF
:param timer_type: type of timer to use (see signal module)
:type timeout_exception_cls: class
:param timeout_exception_cls: exception to throw in case of timeout
:return: fn call result.
def execute_with_timeout(
fn,
args=None,
kwargs=None,
timeout=None,
fail_if_no_timer=True,
signal_type=_default_signal_type,
timer_type=_default_timer_type,
timeout_exception_cls=TimeoutError,
):
"""
Executes specified function with timeout. Uses SIGALRM to interrupt it.
:type fn: function
:param fn: function to execute
:type args: tuple
:param args: function args
:type kwargs: dict
:param kwargs: function kwargs
:type timeout: float
:param timeout: timeout, seconds; 0 or None means no timeout
:type fail_if_no_timer: bool
:param fail_if_no_timer: fail, if timer is nor available; normally it's available only in the
main thread
:type signal_type: signalnum
:param signal_type: type of signal to use (see signal module)
:type timer_type: signal.ITIMER_REAL, signal.ITIMER_VIRTUAL or signal.ITIMER_PROF
:param timer_type: type of timer to use (see signal module)
:type timeout_exception_cls: class
:param timeout_exception_cls: exception to throw in case of timeout
:return: fn call result.
"""
if args is None:
args = empty_tuple
if kwargs is None:
kwargs = empty_dict
if timeout is None or timeout == 0 or signal_type is None or timer_type is None:
return fn(*args, **kwargs)
def signal_handler(signum, frame):
raise timeout_exception_cls(inspection.get_function_call_str(fn, args, kwargs))
old_signal_handler = none
timer_is_set = False
try:
try:
old_signal_handler = signal.signal(signal_type, signal_handler)
signal.setitimer(timer_type, timeout)
timer_is_set = True
except ValueError:
if fail_if_no_timer:
raise NotSupportedError(
"Timer is not available; the code is probably invoked from outside the main "
"thread."
)
return fn(*args, **kwargs)
finally:
if timer_is_set:
signal.setitimer(timer_type, 0)
if old_signal_handler is not none:
signal.signal(signal_type, old_signal_handler)
|
Gets the very original function of a decorated one.
def get_original_fn(fn):
"""Gets the very original function of a decorated one."""
fn_type = type(fn)
if fn_type is classmethod or fn_type is staticmethod:
return get_original_fn(fn.__func__)
if hasattr(fn, "original_fn"):
return fn.original_fn
if hasattr(fn, "fn"):
fn.original_fn = get_original_fn(fn.fn)
return fn.original_fn
return fn
|
Gets full class or function name.
def get_full_name(src):
"""Gets full class or function name."""
if hasattr(src, "_full_name_"):
return src._full_name_
if hasattr(src, "is_decorator"):
# Our own decorator or binder
if hasattr(src, "decorator"):
# Our own binder
_full_name_ = str(src.decorator)
# It's a short-living object, so we don't cache result
else:
# Our own decorator
_full_name_ = str(src)
try:
src._full_name_ = _full_name_
except AttributeError:
pass
except TypeError:
pass
elif hasattr(src, "im_class"):
# Bound method
cls = src.im_class
_full_name_ = get_full_name(cls) + "." + src.__name__
# It's a short-living object, so we don't cache result
elif hasattr(src, "__module__") and hasattr(src, "__name__"):
# Func or class
_full_name_ = (
("<unknown module>" if src.__module__ is None else src.__module__)
+ "."
+ src.__name__
)
try:
src._full_name_ = _full_name_
except AttributeError:
pass
except TypeError:
pass
else:
# Something else
_full_name_ = str(get_original_fn(src))
return _full_name_
|
Converts method call (function and its arguments) to a str(...)-like string.
def get_function_call_str(fn, args, kwargs):
"""Converts method call (function and its arguments) to a str(...)-like string."""
def str_converter(v):
try:
return str(v)
except Exception:
try:
return repr(v)
except Exception:
return "<n/a str raised>"
result = get_full_name(fn) + "("
first = True
for v in args:
if first:
first = False
else:
result += ","
result += str_converter(v)
for k, v in kwargs.items():
if first:
first = False
else:
result += ","
result += str(k) + "=" + str_converter(v)
result += ")"
return result
|
Converts method call (function and its arguments) to a repr(...)-like string.
def get_function_call_repr(fn, args, kwargs):
"""Converts method call (function and its arguments) to a repr(...)-like string."""
result = get_full_name(fn) + "("
first = True
for v in args:
if first:
first = False
else:
result += ","
result += repr(v)
for k, v in kwargs.items():
if first:
first = False
else:
result += ","
result += str(k) + "=" + repr(v)
result += ")"
return result
|
Variation of inspect.getargspec that works for more functions.
This function works for Cythonized, non-cpdef functions, which expose argspec information but
are not accepted by getargspec. It also works for Python 3 functions that use annotations, which
are simply ignored. However, keyword-only arguments are not supported.
def getargspec(func):
"""Variation of inspect.getargspec that works for more functions.
This function works for Cythonized, non-cpdef functions, which expose argspec information but
are not accepted by getargspec. It also works for Python 3 functions that use annotations, which
are simply ignored. However, keyword-only arguments are not supported.
"""
if inspect.ismethod(func):
func = func.__func__
# Cythonized functions have a .__code__, but don't pass inspect.isfunction()
try:
code = func.__code__
except AttributeError:
raise TypeError("{!r} is not a Python function".format(func))
if hasattr(code, "co_kwonlyargcount") and code.co_kwonlyargcount > 0:
raise ValueError("keyword-only arguments are not supported by getargspec()")
args, varargs, varkw = inspect.getargs(code)
return inspect.ArgSpec(args, varargs, varkw, func.__defaults__)
|
Returns whether this function is either a generator function or a Cythonized function.
def is_cython_or_generator(fn):
"""Returns whether this function is either a generator function or a Cythonized function."""
if hasattr(fn, "__func__"):
fn = fn.__func__ # Class method, static method
if inspect.isgeneratorfunction(fn):
return True
name = type(fn).__name__
return (
name == "generator"
or name == "method_descriptor"
or name == "cython_function_or_method"
or name == "builtin_function_or_method"
)
|
Checks if a function is compiled w/Cython.
def is_cython_function(fn):
"""Checks if a function is compiled w/Cython."""
if hasattr(fn, "__func__"):
fn = fn.__func__ # Class method, static method
name = type(fn).__name__
return (
name == "method_descriptor"
or name == "cython_function_or_method"
or name == "builtin_function_or_method"
)
|
Returns whether f is a classmethod.
def is_classmethod(fn):
"""Returns whether f is a classmethod."""
# This is True for bound methods
if not inspect.ismethod(fn):
return False
if not hasattr(fn, "__self__"):
return False
im_self = fn.__self__
# This is None for instance methods on classes, but True
# for instance methods on instances.
if im_self is None:
return False
# This is True for class methods of new- and old-style classes, respectively
return isinstance(im_self, six.class_types)
|
Cython-compatible functools.wraps implementation.
def wraps(
wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, updated=functools.WRAPPER_UPDATES
):
"""Cython-compatible functools.wraps implementation."""
if not is_cython_function(wrapped):
return functools.wraps(wrapped, assigned, updated)
else:
return lambda wrapper: wrapper
|
Returns all subclasses (direct and recursive) of cls.
def get_subclass_tree(cls, ensure_unique=True):
"""Returns all subclasses (direct and recursive) of cls."""
subclasses = []
# cls.__subclasses__() fails on classes inheriting from type
for subcls in type.__subclasses__(cls):
subclasses.append(subcls)
subclasses.extend(get_subclass_tree(subcls, ensure_unique))
return list(set(subclasses)) if ensure_unique else subclasses
|
This function creates a dict type with the specified name and fields.
>>> from pyws.functions.args import DictOf, Field
>>> dct = DictOf(
... 'HelloWorldDict', Field('hello', str), Field('hello', int))
>>> issubclass(dct, Dict)
True
>>> dct.__name__
'HelloWorldDict'
>>> len(dct.fields)
2
def DictOf(name, *fields):
"""
This function creates a dict type with the specified name and fields.
>>> from pyws.functions.args import DictOf, Field
>>> dct = DictOf(
... 'HelloWorldDict', Field('hello', str), Field('hello', int))
>>> issubclass(dct, Dict)
True
>>> dct.__name__
'HelloWorldDict'
>>> len(dct.fields)
2
"""
ret = type(name, (Dict,), {'fields': []})
#noinspection PyUnresolvedReferences
ret.add_fields(*fields)
return ret
|
This function creates a list type with element type ``element_type`` and an
empty element value ``element_none_value``.
>>> from pyws.functions.args import Integer, ListOf
>>> lst = ListOf(int)
>>> issubclass(lst, List)
True
>>> lst.__name__
'IntegerList'
>>> lst.element_type == Integer
True
def ListOf(element_type, element_none_value=None):
"""
This function creates a list type with element type ``element_type`` and an
empty element value ``element_none_value``.
>>> from pyws.functions.args import Integer, ListOf
>>> lst = ListOf(int)
>>> issubclass(lst, List)
True
>>> lst.__name__
'IntegerList'
>>> lst.element_type == Integer
True
"""
from pyws.functions.args.types import TypeFactory
element_type = TypeFactory(element_type)
return type(element_type.__name__ + 'List', (List,), {
'element_type': element_type,
'element_none_value': element_none_value})
|
Return metadata for resource-specific actions,
such as start, stop, unlink
def get_actions(self, request, view):
"""
Return metadata for resource-specific actions,
such as start, stop, unlink
"""
metadata = OrderedDict()
actions = self.get_resource_actions(view)
resource = view.get_object()
for action_name, action in actions.items():
if action_name == 'update':
view.request = clone_request(request, 'PUT')
else:
view.action = action_name
data = ActionSerializer(action, action_name, request, view, resource)
metadata[action_name] = data.serialize()
if not metadata[action_name]['enabled']:
continue
fields = self.get_action_fields(view, action_name, resource)
if not fields:
metadata[action_name]['type'] = 'button'
else:
metadata[action_name]['type'] = 'form'
metadata[action_name]['fields'] = fields
view.action = None
view.request = request
return metadata
|
Get fields exposed by action's serializer
def get_action_fields(self, view, action_name, resource):
"""
Get fields exposed by action's serializer
"""
serializer = view.get_serializer(resource)
fields = OrderedDict()
if not isinstance(serializer, view.serializer_class) or action_name == 'update':
fields = self.get_fields(serializer.fields)
return fields
|
Given an instance of a serializer, return a dictionary of metadata
about its fields.
def get_serializer_info(self, serializer):
"""
Given an instance of a serializer, return a dictionary of metadata
about its fields.
"""
if hasattr(serializer, 'child'):
# If this is a `ListSerializer` then we want to examine the
# underlying child serializer instance instead.
serializer = serializer.child
return self.get_fields(serializer.fields)
|
Get fields metadata skipping empty fields
def get_fields(self, serializer_fields):
"""
Get fields metadata skipping empty fields
"""
fields = OrderedDict()
for field_name, field in serializer_fields.items():
# Skip tags field in action because it is needed only for resource creation
# See also: WAL-1223
if field_name == 'tags':
continue
info = self.get_field_info(field, field_name)
if info:
fields[field_name] = info
return fields
|
Given an instance of a serializer field, return a dictionary
of metadata about it.
def get_field_info(self, field, field_name):
"""
Given an instance of a serializer field, return a dictionary
of metadata about it.
"""
field_info = OrderedDict()
field_info['type'] = self.label_lookup[field]
field_info['required'] = getattr(field, 'required', False)
attrs = [
'label', 'help_text', 'default_value', 'placeholder', 'required',
'min_length', 'max_length', 'min_value', 'max_value', 'many'
]
if getattr(field, 'read_only', False):
return None
for attr in attrs:
value = getattr(field, attr, None)
if value is not None and value != '':
field_info[attr] = force_text(value, strings_only=True)
if 'label' not in field_info:
field_info['label'] = field_name.replace('_', ' ').title()
if hasattr(field, 'view_name'):
list_view = field.view_name.replace('-detail', '-list')
base_url = reverse(list_view, request=self.request)
field_info['type'] = 'select'
field_info['url'] = base_url
if hasattr(field, 'query_params'):
field_info['url'] += '?%s' % urlencode(field.query_params)
field_info['value_field'] = getattr(field, 'value_field', 'url')
field_info['display_name_field'] = getattr(field, 'display_name_field', 'display_name')
if hasattr(field, 'choices') and not hasattr(field, 'queryset'):
field_info['choices'] = [
{
'value': choice_value,
'display_name': force_text(choice_name, strings_only=True)
}
for choice_value, choice_name in field.choices.items()
]
return field_info
|
把async方法执行后的对象创建为async上下文模式
def connect(
database: str,
loop: asyncio.BaseEventLoop = None,
executor: concurrent.futures.Executor = None,
timeout: int = 5,
echo: bool = False,
isolation_level: str = '',
check_same_thread: bool = False,
**kwargs: dict
):
"""
把async方法执行后的对象创建为async上下文模式
"""
coro = _connect(
database,
loop=loop,
executor=executor,
timeout=timeout,
echo=echo,
isolation_level=isolation_level,
check_same_thread=check_same_thread,
**kwargs
)
return _ContextManager(coro)
|
Recalculate price of consumables that were used by resource until now.
Regular task. It is too expensive to calculate consumed price on each
request, so we store cached price each hour.
If recalculate_total is True - task also recalculates total estimate
for current month.
def recalculate_estimate(recalculate_total=False):
""" Recalculate price of consumables that were used by resource until now.
Regular task. It is too expensive to calculate consumed price on each
request, so we store cached price each hour.
If recalculate_total is True - task also recalculates total estimate
for current month.
"""
# Celery does not import server.urls and does not discover cost tracking modules.
# So they should be discovered implicitly.
CostTrackingRegister.autodiscover()
# Step 1. Recalculate resources estimates.
for resource_model in CostTrackingRegister.registered_resources:
for resource in resource_model.objects.all():
_update_resource_consumed(resource, recalculate_total=recalculate_total)
# Step 2. Move from down to top and recalculate consumed estimate for each
# object based on its children.
ancestors_models = [m for m in models.PriceEstimate.get_estimated_models()
if not issubclass(m, structure_models.ResourceMixin)]
for model in ancestors_models:
for ancestor in model.objects.all():
_update_ancestor_consumed(ancestor)
|
Needs to return the string source for the module.
def get_data(self, path):
"""Needs to return the string source for the module."""
return LineCacheNotebookDecoder(
code=self.code, raw=self.raw, markdown=self.markdown
).decode(self.decode(), self.path)
|
Create a lazy loader source file loader.
def loader(self):
"""Create a lazy loader source file loader."""
loader = super().loader
if self._lazy and (sys.version_info.major, sys.version_info.minor) != (3, 4):
loader = LazyLoader.factory(loader)
# Strip the leading underscore from slots
return partial(
loader, **{object.lstrip("_"): getattr(self, object) for object in self.__slots__}
)
|
Import a notebook as a module from a filename.
dir: The directory to load the file from.
main: Load the module in the __main__ context.
> assert Notebook.load('loader.ipynb')
def load(cls, filename, dir=None, main=False, **kwargs):
"""Import a notebook as a module from a filename.
dir: The directory to load the file from.
main: Load the module in the __main__ context.
> assert Notebook.load('loader.ipynb')
"""
name = main and "__main__" or Path(filename).stem
loader = cls(name, str(filename), **kwargs)
module = module_from_spec(FileModuleSpec(name, loader, origin=loader.path))
cwd = str(Path(loader.path).parent)
try:
with ExitStack() as stack:
sys.path.append(cwd)
loader.name != "__main__" and stack.enter_context(_installed_safely(module))
loader.exec_module(module)
finally:
sys.path.pop()
return module
|
* Convert the current source to ast
* Apply ast transformers.
* Compile the code.
def source_to_code(self, nodes, path, *, _optimize=-1):
"""* Convert the current source to ast
* Apply ast transformers.
* Compile the code."""
if not isinstance(nodes, ast.Module):
nodes = ast.parse(nodes, self.path)
if self._markdown_docstring:
nodes = update_docstring(nodes)
return super().source_to_code(
ast.fix_missing_locations(self.visit(nodes)), path, _optimize=_optimize
)
|
Inject extra action URLs.
def get_urls(self):
"""
Inject extra action URLs.
"""
urls = []
for action in self.get_extra_actions():
regex = r'^{}/$'.format(self._get_action_href(action))
view = self.admin_site.admin_view(action)
urls.append(url(regex, view))
return urls + super(ExtraActionsMixin, self).get_urls()
|
Inject extra links into template context.
def changelist_view(self, request, extra_context=None):
"""
Inject extra links into template context.
"""
links = []
for action in self.get_extra_actions():
links.append({
'label': self._get_action_label(action),
'href': self._get_action_href(action)
})
extra_context = extra_context or {}
extra_context['extra_links'] = links
return super(ExtraActionsMixin, self).changelist_view(
request, extra_context=extra_context,
)
|
Starts the session.
Starting the session will actually get the API key of the current user
def start(self):
"""
Starts the session.
Starting the session will actually get the API key of the current user
"""
if NURESTSession.session_stack:
bambou_logger.critical("Starting a session inside a with statement is not supported.")
raise Exception("Starting a session inside a with statement is not supported.")
NURESTSession.current_session = self
self._authenticate()
return self
|
Initialize new instances quotas
def init_quotas(sender, instance, created=False, **kwargs):
""" Initialize new instances quotas """
if not created:
return
for field in sender.get_quotas_fields():
try:
field.get_or_create_quota(scope=instance)
except CreationConditionFailedQuotaError:
pass
|
Creates handler that will recalculate count_quota on creation/deletion
def count_quota_handler_factory(count_quota_field):
""" Creates handler that will recalculate count_quota on creation/deletion """
def recalculate_count_quota(sender, instance, **kwargs):
signal = kwargs['signal']
if signal == signals.post_save and kwargs.get('created'):
count_quota_field.add_usage(instance, delta=1)
elif signal == signals.post_delete:
count_quota_field.add_usage(instance, delta=-1, fail_silently=True)
return recalculate_count_quota
|
Call aggregated quotas fields update methods
def handle_aggregated_quotas(sender, instance, **kwargs):
""" Call aggregated quotas fields update methods """
quota = instance
# aggregation is not supported for global quotas.
if quota.scope is None:
return
quota_field = quota.get_field()
# usage aggregation should not count another usage aggregator field to avoid calls duplication.
if isinstance(quota_field, fields.UsageAggregatorQuotaField) or quota_field is None:
return
signal = kwargs['signal']
for aggregator_quota in quota_field.get_aggregator_quotas(quota):
field = aggregator_quota.get_field()
if signal == signals.post_save:
field.post_child_quota_save(aggregator_quota.scope, child_quota=quota, created=kwargs.get('created'))
elif signal == signals.pre_delete:
field.pre_child_quota_delete(aggregator_quota.scope, child_quota=quota)
|
URL of service settings
def get_settings(self, link):
"""
URL of service settings
"""
return reverse(
'servicesettings-detail', kwargs={'uuid': link.service.settings.uuid}, request=self.context['request'])
|
URL of service
def get_url(self, link):
"""
URL of service
"""
view_name = SupportedServices.get_detail_view_for_model(link.service)
return reverse(view_name, kwargs={'uuid': link.service.uuid.hex}, request=self.context['request'])
|
Count total number of all resources connected to link
def get_resources_count(self, link):
"""
Count total number of all resources connected to link
"""
total = 0
for model in SupportedServices.get_service_resources(link.service):
# Format query path from resource to service project link
query = {model.Permissions.project_path.split('__')[0]: link}
total += model.objects.filter(**query).count()
return total
|
When max_na_values was informed, remove columns when the proportion of
total NA values more than max_na_values threshold.
When max_unique_values was informed, remove columns when the proportion
of the total of unique values is more than the max_unique_values
threshold, just for columns with type as object or category.
:param max_na_values: proportion threshold of max na values
:param max_unique_values:
:return:
def drop_columns(
self, max_na_values: int = None, max_unique_values: int = None
):
"""
When max_na_values was informed, remove columns when the proportion of
total NA values more than max_na_values threshold.
When max_unique_values was informed, remove columns when the proportion
of the total of unique values is more than the max_unique_values
threshold, just for columns with type as object or category.
:param max_na_values: proportion threshold of max na values
:param max_unique_values:
:return:
"""
step = {}
if max_na_values is not None:
step = {
'data-set': self.iid,
'operation': 'drop-na',
'expression': '{"max_na_values":%s, "axis": 1}' % max_na_values
}
if max_unique_values is not None:
step = {
'data-set': self.iid,
'operation': 'drop-unique',
'expression': '{"max_unique_values":%s}' % max_unique_values
}
self.attr_update(attr='steps', value=[step])
|
:return:
def dropna(self):
"""
:return:
"""
step = {
'data-set': self.iid,
'operation': 'drop-na',
'expression': '{"axis": 0}'
}
self.attr_update(attr='steps', value=[step])
|
@deprecated
:param message:
:return:
def log(self, message: str):
"""
@deprecated
:param message:
:return:
"""
dset_log_id = '_%s_log' % self.iid
if dset_log_id not in self.parent.data.keys():
dset = self.parent.data.create_dataset(
dset_log_id, shape=(1,),
dtype=np.dtype([
('dt_log', '<i8'),
('message', 'S250')
])
)
else:
dset = self.parent.data[dset_log_id]
timestamp = np.array(
datetime.now().strftime("%s")
).astype('<i8').view('<M8[s]')
dset['dt_log'] = timestamp.view('<i8')
dset['message'] = message
self.parent.data.flush()
|
:param compute: if should call compute method
:return:
def summary(self, compute=False) -> pd.DataFrame:
"""
:param compute: if should call compute method
:return:
"""
if compute or self.result is None:
self.compute()
return summary(self.result)
|
Twisted Web adapter. It has two arguments:
#. ``request`` is a Twisted Web request object,
#. ``server`` is a pyws server object.
First one is the context of an application, function ``serve`` transforms
it into a pyws request object. Then it feeds the request to the server,
gets the response, sets header ``Content-Type`` and returns response text.
def serve(request, server):
"""
Twisted Web adapter. It has two arguments:
#. ``request`` is a Twisted Web request object,
#. ``server`` is a pyws server object.
First one is the context of an application, function ``serve`` transforms
it into a pyws request object. Then it feeds the request to the server,
gets the response, sets header ``Content-Type`` and returns response text.
"""
request_ = Request('/'.join(request.postpath),
request.content.read() if not request.method == 'GET' else '',
request.args, request.args, {})
response = server.process_request(request_)
request.setHeader('Content-Type', response.content_type)
request.setResponseCode(get_http_response_code_num(response))
return response.text
|
Returns list of application models in topological order.
It is used in order to correctly delete dependent resources.
def get_sorted_dependencies(service_model):
"""
Returns list of application models in topological order.
It is used in order to correctly delete dependent resources.
"""
app_models = list(service_model._meta.app_config.get_models())
dependencies = {model: set() for model in app_models}
relations = (
relation
for model in app_models
for relation in model._meta.related_objects
if relation.on_delete in (models.PROTECT, models.CASCADE)
)
for rel in relations:
dependencies[rel.model].add(rel.related_model)
return stable_topological_sort(app_models, dependencies)
|
Update instance fields based on imported from backend data.
Save changes to DB only one or more fields were changed.
def update_pulled_fields(instance, imported_instance, fields):
"""
Update instance fields based on imported from backend data.
Save changes to DB only one or more fields were changed.
"""
modified = False
for field in fields:
pulled_value = getattr(imported_instance, field)
current_value = getattr(instance, field)
if current_value != pulled_value:
setattr(instance, field, pulled_value)
logger.info("%s's with PK %s %s field updated from value '%s' to value '%s'",
instance.__class__.__name__, instance.pk, field, current_value, pulled_value)
modified = True
error_message = getattr(imported_instance, 'error_message', '') or getattr(instance, 'error_message', '')
if error_message and instance.error_message != error_message:
instance.error_message = imported_instance.error_message
modified = True
if modified:
instance.save()
|
Set resource state to ERRED and append/create "not found" error message.
def handle_resource_not_found(resource):
"""
Set resource state to ERRED and append/create "not found" error message.
"""
resource.set_erred()
resource.runtime_state = ''
message = 'Does not exist at backend.'
if message not in resource.error_message:
if not resource.error_message:
resource.error_message = message
else:
resource.error_message += ' (%s)' % message
resource.save()
logger.warning('%s %s (PK: %s) does not exist at backend.' % (
resource.__class__.__name__, resource, resource.pk))
|
Recover resource if its state is ERRED and clear error message.
def handle_resource_update_success(resource):
"""
Recover resource if its state is ERRED and clear error message.
"""
update_fields = []
if resource.state == resource.States.ERRED:
resource.recover()
update_fields.append('state')
if resource.state in (resource.States.UPDATING, resource.States.CREATING):
resource.set_ok()
update_fields.append('state')
if resource.error_message:
resource.error_message = ''
update_fields.append('error_message')
if update_fields:
resource.save(update_fields=update_fields)
logger.warning('%s %s (PK: %s) was successfully updated.' % (
resource.__class__.__name__, resource, resource.pk))
|
Set header value
def set_header(self, header, value):
""" Set header value """
# requests>=2.11 only accepts `str` or `bytes` header values
# raising an exception here, instead of leaving it to `requests` makes
# it easy to know where we passed a wrong header type in the code.
if not isinstance(value, (str, bytes)):
raise TypeError("header values must be str or bytes, but %s value has type %s" % (header, type(value)))
self._headers[header] = value
|
Mark instance as erred and save error message
def set_instance_erred(self, instance, error_message):
""" Mark instance as erred and save error message """
instance.set_erred()
instance.error_message = error_message
instance.save(update_fields=['state', 'error_message'])
|
Use ISO 639-3 ??
def map_language(language, dash3=True):
""" Use ISO 639-3 ?? """
if dash3:
from iso639 import languages
else:
from pycountry import languages
if '_' in language:
language = language.split('_')[0]
if len(language) == 2:
try: return languages.get(alpha2=language.lower())
except KeyError: pass
elif len(language) == 3:
if dash3:
try: return languages.get(part3=language.lower())
except KeyError: pass
try: return languages.get(terminology=language.lower())
except KeyError: pass
try: return languages.get(bibliographic=language.lower())
except KeyError: pass
else:
try: return languages.get(name=language.title())
except KeyError: pass
if dash3:
try: return languages.get(inverted=language.title())
except KeyError: pass
for l in re.split('[,.;: ]+', language):
try: return languages.get(name=l.title())
except KeyError: pass
|
Delete each resource using specific executor.
Convert executors to task and combine all deletion task into single sequential task.
def get_task_signature(cls, instance, serialized_instance, **kwargs):
"""
Delete each resource using specific executor.
Convert executors to task and combine all deletion task into single sequential task.
"""
cleanup_tasks = [
ProjectResourceCleanupTask().si(
core_utils.serialize_class(executor_cls),
core_utils.serialize_class(model_cls),
serialized_instance,
)
for (model_cls, executor_cls) in cls.executors
]
if not cleanup_tasks:
return core_tasks.EmptyTask()
return chain(cleanup_tasks)
|
Return a OrderedDict ordered by key names from the :unsorted_dict:
def sort_dict(unsorted_dict):
"""
Return a OrderedDict ordered by key names from the :unsorted_dict:
"""
sorted_dict = OrderedDict()
# sort items before inserting them into a dict
for key, value in sorted(unsorted_dict.items(), key=itemgetter(0)):
sorted_dict[key] = value
return sorted_dict
|
Format time_and_value_list to time segments
Parameters
^^^^^^^^^^
time_and_value_list: list of tuples
Have to be sorted by time
Example: [(time, value), (time, value) ...]
segments_count: integer
How many segments will be in result
Returns
^^^^^^^
List of dictionaries
Example:
[{'from': time1, 'to': time2, 'value': sum_of_values_from_time1_to_time2}, ...]
def format_time_and_value_to_segment_list(time_and_value_list, segments_count, start_timestamp,
end_timestamp, average=False):
"""
Format time_and_value_list to time segments
Parameters
^^^^^^^^^^
time_and_value_list: list of tuples
Have to be sorted by time
Example: [(time, value), (time, value) ...]
segments_count: integer
How many segments will be in result
Returns
^^^^^^^
List of dictionaries
Example:
[{'from': time1, 'to': time2, 'value': sum_of_values_from_time1_to_time2}, ...]
"""
segment_list = []
time_step = (end_timestamp - start_timestamp) / segments_count
for i in range(segments_count):
segment_start_timestamp = start_timestamp + time_step * i
segment_end_timestamp = segment_start_timestamp + time_step
value_list = [
value for time, value in time_and_value_list
if time >= segment_start_timestamp and time < segment_end_timestamp]
segment_value = sum(value_list)
if average and len(value_list) != 0:
segment_value /= len(value_list)
segment_list.append({
'from': segment_start_timestamp,
'to': segment_end_timestamp,
'value': segment_value,
})
return segment_list
|
Serialize Django model instance
def serialize_instance(instance):
""" Serialize Django model instance """
model_name = force_text(instance._meta)
return '{}:{}'.format(model_name, instance.pk)
|
Deserialize Django model instance
def deserialize_instance(serialized_instance):
""" Deserialize Django model instance """
model_name, pk = serialized_instance.split(':')
model = apps.get_model(model_name)
return model._default_manager.get(pk=pk)
|
Deserialize Python class
def deserialize_class(serilalized_cls):
""" Deserialize Python class """
module_name, cls_name = serilalized_cls.split(':')
module = importlib.import_module(module_name)
return getattr(module, cls_name)
|
Restore instance from URL
def instance_from_url(url, user=None):
""" Restore instance from URL """
# XXX: This circular dependency will be removed then filter_queryset_for_user
# will be moved to model manager method
from waldur_core.structure.managers import filter_queryset_for_user
url = clear_url(url)
match = resolve(url)
model = get_model_from_resolve_match(match)
queryset = model.objects.all()
if user is not None:
queryset = filter_queryset_for_user(model.objects.all(), user)
return queryset.get(**match.kwargs)
|
Close this transaction.
If this transaction is the base transaction in a begin/commit
nesting, the transaction will rollback(). Otherwise, the
method returns.
This is used to cancel a Transaction without affecting the scope of
an enclosing transaction.
def close(self):
"""
Close this transaction.
If this transaction is the base transaction in a begin/commit
nesting, the transaction will rollback(). Otherwise, the
method returns.
This is used to cancel a Transaction without affecting the scope of
an enclosing transaction.
"""
if not self._connection or not self._parent:
return
if not self._parent._is_active:
# pragma: no cover
self._connection = None
# self._parent = None
return
if self._parent is self:
yield from self.rollback()
else:
self._is_active = False
self._connection = None
self._parent = None
|
Commit this transaction.
def commit(self):
"""
Commit this transaction.
"""
if not self._parent._is_active:
raise exc.InvalidRequestError("This transaction is inactive")
yield from self._do_commit()
self._is_active = False
|
Returns an app config for the given name, not by label.
def _get_app_config(self, app_name):
"""
Returns an app config for the given name, not by label.
"""
matches = [app_config for app_config in apps.get_app_configs()
if app_config.name == app_name]
if not matches:
return
return matches[0]
|
Some plugins ship multiple applications and extensions.
However all of them have the same version, because they are released together.
That's why only-top level module is used to fetch version information.
def _get_app_version(self, app_config):
"""
Some plugins ship multiple applications and extensions.
However all of them have the same version, because they are released together.
That's why only-top level module is used to fetch version information.
"""
base_name = app_config.__module__.split('.')[0]
module = __import__(base_name)
return getattr(module, '__version__', 'N/A')
|
Returns a list of ListLink items to be added to Quick Access tab.
Contains:
- links to Organizations, Projects and Users;
- a link to shared service settings;
- custom configured links in admin/settings FLUENT_DASHBOARD_QUICK_ACCESS_LINKS attribute;
def _get_quick_access_info(self):
"""
Returns a list of ListLink items to be added to Quick Access tab.
Contains:
- links to Organizations, Projects and Users;
- a link to shared service settings;
- custom configured links in admin/settings FLUENT_DASHBOARD_QUICK_ACCESS_LINKS attribute;
"""
quick_access_links = []
# add custom links
quick_access_links.extend(settings.FLUENT_DASHBOARD_QUICK_ACCESS_LINKS)
for model in (structure_models.Project,
structure_models.Customer,
core_models.User,
structure_models.SharedServiceSettings):
quick_access_links.append(self._get_link_to_model(model))
return quick_access_links
|
Returns a LinkList based module which contains link to shared service setting instances in ERRED state.
def _get_erred_shared_settings_module(self):
"""
Returns a LinkList based module which contains link to shared service setting instances in ERRED state.
"""
result_module = modules.LinkList(title=_('Shared provider settings in erred state'))
result_module.template = 'admin/dashboard/erred_link_list.html'
erred_state = structure_models.SharedServiceSettings.States.ERRED
queryset = structure_models.SharedServiceSettings.objects
settings_in_erred_state = queryset.filter(state=erred_state).count()
if settings_in_erred_state:
result_module.title = '%s (%s)' % (result_module.title, settings_in_erred_state)
for service_settings in queryset.filter(state=erred_state).iterator():
module_child = self._get_link_to_instance(service_settings)
module_child['error'] = service_settings.error_message
result_module.children.append(module_child)
else:
result_module.pre_content = _('Nothing found.')
return result_module
|
Returns a list of links to resources which are in ERRED state and linked to a shared service settings.
def _get_erred_resources_module(self):
"""
Returns a list of links to resources which are in ERRED state and linked to a shared service settings.
"""
result_module = modules.LinkList(title=_('Resources in erred state'))
erred_state = structure_models.NewResource.States.ERRED
children = []
resource_models = SupportedServices.get_resource_models()
resources_in_erred_state_overall = 0
for resource_type, resource_model in resource_models.items():
queryset = resource_model.objects.filter(service_project_link__service__settings__shared=True)
erred_amount = queryset.filter(state=erred_state).count()
if erred_amount:
resources_in_erred_state_overall = resources_in_erred_state_overall + erred_amount
link = self._get_erred_resource_link(resource_model, erred_amount, erred_state)
children.append(link)
if resources_in_erred_state_overall:
result_module.title = '%s (%s)' % (result_module.title, resources_in_erred_state_overall)
result_module.children = children
else:
result_module.pre_content = _('Nothing found.')
return result_module
|
Django adapter. It has three arguments:
#. ``request`` is a Django request object,
#. ``tail`` is everything that's left from an URL, which adapter is
attached to,
#. ``server`` is a pyws server object.
First two are the context of an application, function ``serve`` transforms
them into a pyws request object. Then it feeds the request to the server,
gets the response and transforms it into a Django response object.
def serve(request, tail, server):
"""
Django adapter. It has three arguments:
#. ``request`` is a Django request object,
#. ``tail`` is everything that's left from an URL, which adapter is
attached to,
#. ``server`` is a pyws server object.
First two are the context of an application, function ``serve`` transforms
them into a pyws request object. Then it feeds the request to the server,
gets the response and transforms it into a Django response object.
"""
if request.GET:
body = ''
else:
try:
body = request.body
except AttributeError:
body = request.raw_post_data
request = Request(
tail,
body,
parse_qs(request.META['QUERY_STRING']),
parse_qs(body),
request.COOKIES,
)
response = server.process_request(request)
return HttpResponse(
response.text, content_type=response.content_type,
status=get_http_response_code_num(response))
|
Get index of the given item
Args:
nurest_object (bambou.NURESTObject): the NURESTObject object to verify
Returns:
Returns the position of the object.
Raises:
Raise a ValueError exception if object is not present
def index(self, nurest_object):
""" Get index of the given item
Args:
nurest_object (bambou.NURESTObject): the NURESTObject object to verify
Returns:
Returns the position of the object.
Raises:
Raise a ValueError exception if object is not present
"""
for index, obj in enumerate(self):
if obj.equals(nurest_object):
return index
raise ValueError("%s is not in %s" % (nurest_object, self))
|
Register the fetcher for a served object.
This method will fill the fetcher with `managed_class` instances
Args:
parent_object: the instance of the parent object to serve
Returns:
It returns the fetcher instance.
def fetcher_with_object(cls, parent_object, relationship="child"):
""" Register the fetcher for a served object.
This method will fill the fetcher with `managed_class` instances
Args:
parent_object: the instance of the parent object to serve
Returns:
It returns the fetcher instance.
"""
fetcher = cls()
fetcher.parent_object = parent_object
fetcher.relationship = relationship
rest_name = cls.managed_object_rest_name()
parent_object.register_fetcher(fetcher, rest_name)
return fetcher
|
Prepare headers for the given request
Args:
request: the NURESTRequest to send
filter: string
order_by: string
group_by: list of names
page: int
page_size: int
def _prepare_headers(self, request, filter=None, order_by=None, group_by=[], page=None, page_size=None):
""" Prepare headers for the given request
Args:
request: the NURESTRequest to send
filter: string
order_by: string
group_by: list of names
page: int
page_size: int
"""
if filter:
request.set_header('X-Nuage-Filter', filter)
if order_by:
request.set_header('X-Nuage-OrderBy', order_by)
if page is not None:
request.set_header('X-Nuage-Page', str(page))
if page_size:
request.set_header('X-Nuage-PageSize', str(page_size))
if len(group_by) > 0:
header = ", ".join(group_by)
request.set_header('X-Nuage-GroupBy', 'true')
request.set_header('X-Nuage-Attributes', header)
|
Fetch objects according to given filter and page.
Note:
This method fetches all managed class objects and store them
in local_name of the served object. which means that the parent
object will hold them in a list. You can prevent this behavior
by setting commit to False. In that case, the fetched children
won't be added in the parent object cache.
Args:
filter (string): string that represents a predicate filter
order_by (string): string that represents an order by clause
group_by (string): list of names for grouping
page (int): number of the page to load
page_size (int): number of results per page
commit (bool): boolean to update current object
callback (function): Callback that should be called in case of a async request
Returns:
tuple: Returns a tuple of information (fetcher, served object, fetched objects, connection)
Example:
>>> entity.children.fetch()
(<NUChildrenFetcher at aaaa>, <NUEntity at bbbb>, [<NUChildren at ccc>, <NUChildren at ddd>], <NURESTConnection at zzz>)
def fetch(self, filter=None, order_by=None, group_by=[], page=None, page_size=None, query_parameters=None, commit=True, async=False, callback=None):
""" Fetch objects according to given filter and page.
Note:
This method fetches all managed class objects and store them
in local_name of the served object. which means that the parent
object will hold them in a list. You can prevent this behavior
by setting commit to False. In that case, the fetched children
won't be added in the parent object cache.
Args:
filter (string): string that represents a predicate filter
order_by (string): string that represents an order by clause
group_by (string): list of names for grouping
page (int): number of the page to load
page_size (int): number of results per page
commit (bool): boolean to update current object
callback (function): Callback that should be called in case of a async request
Returns:
tuple: Returns a tuple of information (fetcher, served object, fetched objects, connection)
Example:
>>> entity.children.fetch()
(<NUChildrenFetcher at aaaa>, <NUEntity at bbbb>, [<NUChildren at ccc>, <NUChildren at ddd>], <NURESTConnection at zzz>)
"""
request = NURESTRequest(method=HTTP_METHOD_GET, url=self._prepare_url(), params=query_parameters)
self._prepare_headers(request=request, filter=filter, order_by=order_by, group_by=group_by, page=page, page_size=page_size)
if async:
return self.parent_object.send_request(request=request, async=async, local_callback=self._did_fetch, remote_callback=callback, user_info={'commit': commit})
connection = self.parent_object.send_request(request=request, user_info={'commit': commit})
return self._did_fetch(connection=connection)
|
Fetching objects has been done
def _did_fetch(self, connection):
""" Fetching objects has been done """
self.current_connection = connection
response = connection.response
should_commit = 'commit' not in connection.user_info or connection.user_info['commit']
if connection.response.status_code >= 400 and BambouConfig._should_raise_bambou_http_error:
raise BambouHTTPError(connection=connection)
if response.status_code != 200:
if should_commit:
self.current_total_count = 0
self.current_page = 0
self.current_ordered_by = ''
return self._send_content(content=None, connection=connection)
results = response.data
fetched_objects = list()
current_ids = list()
if should_commit:
if 'X-Nuage-Count' in response.headers and response.headers['X-Nuage-Count']:
self.current_total_count = int(response.headers['X-Nuage-Count'])
if 'X-Nuage-Page' in response.headers and response.headers['X-Nuage-Page']:
self.current_page = int(response.headers['X-Nuage-Page'])
if 'X-Nuage-OrderBy' in response.headers and response.headers['X-Nuage-OrderBy']:
self.current_ordered_by = response.headers['X-Nuage-OrderBy']
if results:
for result in results:
nurest_object = self.new()
nurest_object.from_dict(result)
nurest_object.parent = self.parent_object
fetched_objects.append(nurest_object)
if not should_commit:
continue
current_ids.append(nurest_object.id)
if nurest_object in self:
idx = self.index(nurest_object)
current_object = self[idx]
current_object.from_dict(nurest_object.to_dict())
else:
self.append(nurest_object)
if should_commit:
for obj in self:
if obj.id not in current_ids:
self.remove(obj)
return self._send_content(content=fetched_objects, connection=connection)
|
Fetch object and directly return them
Note:
`get` won't put the fetched objects in the parent's children list.
You cannot override this behavior. If you want to commit them in the parent
you can use :method:vsdk.NURESTFetcher.fetch or manually add the list with
:method:vsdk.NURESTObject.add_child
Args:
filter (string): string that represents a predicate filter
order_by (string): string that represents an order by clause
group_by (string): list of names for grouping
page (int): number of the page to load
page_size (int): number of results per page
commit (bool): boolean to update current object
callback (function): Callback that should be called in case of a async request
Returns:
list: list of vsdk.NURESTObject if any
Example:
>>> print entity.children.get()
[<NUChildren at xxx>, <NUChildren at yyyy>, <NUChildren at zzz>]
def get(self, filter=None, order_by=None, group_by=[], page=None, page_size=None, query_parameters=None, commit=True, async=False, callback=None):
""" Fetch object and directly return them
Note:
`get` won't put the fetched objects in the parent's children list.
You cannot override this behavior. If you want to commit them in the parent
you can use :method:vsdk.NURESTFetcher.fetch or manually add the list with
:method:vsdk.NURESTObject.add_child
Args:
filter (string): string that represents a predicate filter
order_by (string): string that represents an order by clause
group_by (string): list of names for grouping
page (int): number of the page to load
page_size (int): number of results per page
commit (bool): boolean to update current object
callback (function): Callback that should be called in case of a async request
Returns:
list: list of vsdk.NURESTObject if any
Example:
>>> print entity.children.get()
[<NUChildren at xxx>, <NUChildren at yyyy>, <NUChildren at zzz>]
"""
return self.fetch(filter=filter, order_by=order_by, group_by=group_by, page=page, page_size=page_size, query_parameters=query_parameters, commit=commit)[2]
|
Fetch object and directly return the first one
Note:
`get_first` won't put the fetched object in the parent's children list.
You cannot override this behavior. If you want to commit it in the parent
you can use :method:vsdk.NURESTFetcher.fetch or manually add it with
:method:vsdk.NURESTObject.add_child
Args:
filter (string): string that represents a predicate filter
order_by (string): string that represents an order by clause
group_by (string): list of names for grouping
page (int): number of the page to load
page_size (int): number of results per page
commit (bool): boolean to update current object
callback (function): Callback that should be called in case of a async request
Returns:
vsdk.NURESTObject: the first object if any, or None
Example:
>>> print entity.children.get_first(filter="name == 'My Entity'")
<NUChildren at xxx>
def get_first(self, filter=None, order_by=None, group_by=[], query_parameters=None, commit=False, async=False, callback=None):
""" Fetch object and directly return the first one
Note:
`get_first` won't put the fetched object in the parent's children list.
You cannot override this behavior. If you want to commit it in the parent
you can use :method:vsdk.NURESTFetcher.fetch or manually add it with
:method:vsdk.NURESTObject.add_child
Args:
filter (string): string that represents a predicate filter
order_by (string): string that represents an order by clause
group_by (string): list of names for grouping
page (int): number of the page to load
page_size (int): number of results per page
commit (bool): boolean to update current object
callback (function): Callback that should be called in case of a async request
Returns:
vsdk.NURESTObject: the first object if any, or None
Example:
>>> print entity.children.get_first(filter="name == 'My Entity'")
<NUChildren at xxx>
"""
objects = self.get(filter=filter, order_by=order_by, group_by=group_by, page=0, page_size=1, query_parameters=query_parameters, commit=commit)
return objects[0] if len(objects) else None
|
Get the total count of objects that can be fetched according to filter
This method can be asynchronous and trigger the callback method
when result is ready.
Args:
filter (string): string that represents a predicate fitler (eg. name == 'x')
order_by (string): string that represents an order by clause
group_by (string): list of names for grouping
page (int): number of the page to load
page_size (int): number of results per page
callback (function): Method that will be triggered asynchronously
Returns:
Returns a transaction ID when asynchronous call is made.
Otherwise it will return a tuple of information containing
(fetcher, served object, count of fetched objects)
def count(self, filter=None, order_by=None, group_by=[], page=None, page_size=None, query_parameters=None, async=False, callback=None):
""" Get the total count of objects that can be fetched according to filter
This method can be asynchronous and trigger the callback method
when result is ready.
Args:
filter (string): string that represents a predicate fitler (eg. name == 'x')
order_by (string): string that represents an order by clause
group_by (string): list of names for grouping
page (int): number of the page to load
page_size (int): number of results per page
callback (function): Method that will be triggered asynchronously
Returns:
Returns a transaction ID when asynchronous call is made.
Otherwise it will return a tuple of information containing
(fetcher, served object, count of fetched objects)
"""
request = NURESTRequest(method=HTTP_METHOD_HEAD, url=self._prepare_url(), params=query_parameters)
self._prepare_headers(request=request, filter=filter, order_by=order_by, group_by=group_by, page=page, page_size=page_size)
if async:
return self.parent_object.send_request(request=request, async=async, local_callback=self._did_count, remote_callback=callback)
else:
connection = self.parent_object.send_request(request=request)
return self._did_count(connection)
|
Get the total count of objects that can be fetched according to filter
Args:
filter (string): string that represents a predicate fitler (eg. name == 'x')
order_by (string): string that represents an order by clause
group_by (string): list of names for grouping
page (int): number of the page to load
page_size (int): number of results per page
Returns:
Returns the number of objects found
def get_count(self, filter=None, order_by=None, group_by=[], page=None, page_size=None, query_parameters=None):
""" Get the total count of objects that can be fetched according to filter
Args:
filter (string): string that represents a predicate fitler (eg. name == 'x')
order_by (string): string that represents an order by clause
group_by (string): list of names for grouping
page (int): number of the page to load
page_size (int): number of results per page
Returns:
Returns the number of objects found
"""
return self.count(filter=filter, order_by=order_by, group_by=group_by, page=page, page_size=page_size, query_parameters=query_parameters, async=False)[2]
|
Called when count if finished
def _did_count(self, connection):
""" Called when count if finished """
self.current_connection = connection
response = connection.response
count = 0
callback = None
if 'X-Nuage-Count' in response.headers:
count = int(response.headers['X-Nuage-Count'])
if 'remote' in connection.callbacks:
callback = connection.callbacks['remote']
if connection.async:
if callback:
callback(self, self.parent_object, count)
self.current_connection.reset()
self.current_connection = None
else:
if connection.response.status_code >= 400 and BambouConfig._should_raise_bambou_http_error:
raise BambouHTTPError(connection=connection)
return (self, self.parent_object, count)
|
Send a content array from the connection
def _send_content(self, content, connection):
""" Send a content array from the connection """
if connection:
if connection.async:
callback = connection.callbacks['remote']
if callback:
callback(self, self.parent_object, content)
self.current_connection.reset()
self.current_connection = None
else:
return (self, self.parent_object, content)
|
Save how much consumables were used and update current configuration.
Return True if configuration changed.
def update_configuration(self, new_configuration):
""" Save how much consumables were used and update current configuration.
Return True if configuration changed.
"""
if new_configuration == self.configuration:
return False
now = timezone.now()
if now.month != self.price_estimate.month:
raise ConsumptionDetailUpdateError('It is possible to update consumption details only for current month.')
minutes_from_last_update = self._get_minutes_from_last_update(now)
for consumable_item, usage in self.configuration.items():
consumed_after_modification = usage * minutes_from_last_update
self.consumed_before_update[consumable_item] = (
self.consumed_before_update.get(consumable_item, 0) + consumed_after_modification)
self.configuration = new_configuration
self.last_update_time = now
self.save()
return True
|
How many resources were (or will be) consumed until end of the month
def consumed_in_month(self):
""" How many resources were (or will be) consumed until end of the month """
month_end = core_utils.month_end(datetime.date(self.price_estimate.year, self.price_estimate.month, 1))
return self._get_consumed(month_end)
|
How many consumables were (or will be) used by resource until given time.
def _get_consumed(self, time):
""" How many consumables were (or will be) used by resource until given time. """
minutes_from_last_update = self._get_minutes_from_last_update(time)
if minutes_from_last_update < 0:
raise ConsumptionDetailCalculateError('Cannot calculate consumption if time < last modification date.')
_consumed = {}
for consumable_item in set(list(self.configuration.keys()) + list(self.consumed_before_update.keys())):
after_update = self.configuration.get(consumable_item, 0) * minutes_from_last_update
before_update = self.consumed_before_update.get(consumable_item, 0)
_consumed[consumable_item] = after_update + before_update
return _consumed
|
How much minutes passed from last update to given time
def _get_minutes_from_last_update(self, time):
""" How much minutes passed from last update to given time """
time_from_last_update = time - self.last_update_time
return int(time_from_last_update.total_seconds() / 60)
|
Get list of all price list items that should be used for resource.
If price list item is defined for service - return it, otherwise -
return default price list item.
def get_for_resource(resource):
""" Get list of all price list items that should be used for resource.
If price list item is defined for service - return it, otherwise -
return default price list item.
"""
resource_content_type = ContentType.objects.get_for_model(resource)
default_items = set(DefaultPriceListItem.objects.filter(resource_content_type=resource_content_type))
service = resource.service_project_link.service
items = set(PriceListItem.objects.filter(
default_price_list_item__in=default_items, service=service).select_related('default_price_list_item'))
rewrited_defaults = set([i.default_price_list_item for i in items])
return items | (default_items - rewrited_defaults)
|
Get a list of available extensions
def get_extensions(cls):
""" Get a list of available extensions """
assemblies = []
for waldur_extension in pkg_resources.iter_entry_points('waldur_extensions'):
extension_module = waldur_extension.load()
if inspect.isclass(extension_module) and issubclass(extension_module, cls):
if not extension_module.is_assembly():
yield extension_module
else:
assemblies.append(extension_module)
for assembly in assemblies:
yield assembly
|
Truncates a string to be at most max_length long.
def ellipsis(source, max_length):
"""Truncates a string to be at most max_length long."""
if max_length == 0 or len(source) <= max_length:
return source
return source[: max(0, max_length - 3)] + "..."
|
Wrapper for str() that catches exceptions.
def safe_str(source, max_length=0):
"""Wrapper for str() that catches exceptions."""
try:
return ellipsis(str(source), max_length)
except Exception as e:
return ellipsis("<n/a: str(...) raised %s>" % e, max_length)
|
Wrapper for repr() that catches exceptions.
def safe_repr(source, max_length=0):
"""Wrapper for repr() that catches exceptions."""
try:
return ellipsis(repr(source), max_length)
except Exception as e:
return ellipsis("<n/a: repr(...) raised %s>" % e, max_length)
|
Returns an object with the key-value pairs in source as attributes.
def dict_to_object(source):
"""Returns an object with the key-value pairs in source as attributes."""
target = inspectable_class.InspectableClass()
for k, v in source.items():
setattr(target, k, v)
return target
|
Shallow copies all public attributes from source_obj to dest_obj.
Overwrites them if they already exist.
def copy_public_attrs(source_obj, dest_obj):
"""Shallow copies all public attributes from source_obj to dest_obj.
Overwrites them if they already exist.
"""
for name, value in inspect.getmembers(source_obj):
if not any(name.startswith(x) for x in ["_", "func", "im"]):
setattr(dest_obj, name, value)
|
Creates a Python class or function from its fully qualified name.
:param name: A fully qualified name of a class or a function. In Python 3 this
is only allowed to be of text type (unicode). In Python 2, both bytes and unicode
are allowed.
:return: A function or class object.
This method is used by serialization code to create a function or class
from a fully qualified name.
def object_from_string(name):
"""Creates a Python class or function from its fully qualified name.
:param name: A fully qualified name of a class or a function. In Python 3 this
is only allowed to be of text type (unicode). In Python 2, both bytes and unicode
are allowed.
:return: A function or class object.
This method is used by serialization code to create a function or class
from a fully qualified name.
"""
if six.PY3:
if not isinstance(name, str):
raise TypeError("name must be str, not %r" % type(name))
else:
if isinstance(name, unicode):
name = name.encode("ascii")
if not isinstance(name, (str, unicode)):
raise TypeError("name must be bytes or unicode, got %r" % type(name))
pos = name.rfind(".")
if pos < 0:
raise ValueError("Invalid function or class name %s" % name)
module_name = name[:pos]
func_name = name[pos + 1 :]
try:
mod = __import__(module_name, fromlist=[func_name], level=0)
except ImportError:
# Hail mary. if the from import doesn't work, then just import the top level module
# and do getattr on it, one level at a time. This will handle cases where imports are
# done like `from . import submodule as another_name`
parts = name.split(".")
mod = __import__(parts[0], level=0)
for i in range(1, len(parts)):
mod = getattr(mod, parts[i])
return mod
else:
return getattr(mod, func_name)
|
Returns True if exceptions can be caught in the except clause.
The exception can be caught if it is an Exception type or a tuple of
exception types.
def catchable_exceptions(exceptions):
"""Returns True if exceptions can be caught in the except clause.
The exception can be caught if it is an Exception type or a tuple of
exception types.
"""
if isinstance(exceptions, type) and issubclass(exceptions, BaseException):
return True
if (
isinstance(exceptions, tuple)
and exceptions
and all(issubclass(it, BaseException) for it in exceptions)
):
return True
return False
|
Temporarily overrides the old value with the new one.
def override(self, value):
"""Temporarily overrides the old value with the new one."""
if self._value is not value:
return _ScopedValueOverrideContext(self, value)
else:
return empty_context
|
Execute validation for actions that are related to particular object
def validate_object_action(self, action_name, obj=None):
""" Execute validation for actions that are related to particular object """
action_method = getattr(self, action_name)
if not getattr(action_method, 'detail', False) and action_name not in ('update', 'partial_update', 'destroy'):
# DRF does not add flag 'detail' to update and delete actions, however they execute operation with
# particular object. We need to enable validation for them too.
return
validators = getattr(self, action_name + '_validators', [])
for validator in validators:
validator(obj or self.get_object())
|
returns dictionary version of row using keys from self.field_map
def row_dict(self, row):
"""returns dictionary version of row using keys from self.field_map"""
d = {}
for field_name,index in self.field_map.items():
d[field_name] = self.field_value(row, field_name)
return d
|
returns detected dialect of filepath and sets self.has_header
if not passed in __init__ kwargs
Arguments:
filepath (str): filepath of target csv file
def _dialect(self, filepath):
"""returns detected dialect of filepath and sets self.has_header
if not passed in __init__ kwargs
Arguments:
filepath (str): filepath of target csv file
"""
with open(filepath, self.read_mode) as csvfile:
sample = csvfile.read(1024)
dialect = csv.Sniffer().sniff(sample)
if self.has_header == None:
# detect header if header not specified
self.has_header = csv.Sniffer().has_header(sample)
csvfile.seek(0)
return dialect
|
Get list of services content types
def _get_content_type_queryset(models_list):
""" Get list of services content types """
content_type_ids = {c.id for c in ContentType.objects.get_for_models(*models_list).values()}
return ContentType.objects.filter(id__in=content_type_ids)
|
Create default price list items for each registered resource.
def init_registered(self, request):
""" Create default price list items for each registered resource. """
created_items = models.DefaultPriceListItem.init_from_registered_resources()
if created_items:
message = ungettext(
_('Price item was created: %s.') % created_items[0].name,
_('Price items were created: %s.') % ', '.join(item.name for item in created_items),
len(created_items)
)
self.message_user(request, message)
else:
self.message_user(request, _('Price items for all registered resources have been updated.'))
return redirect(reverse('admin:cost_tracking_defaultpricelistitem_changelist'))
|
Re-initialize configuration for resource if it has been changed.
This method should be called if resource consumption strategy was changed.
def reinit_configurations(self, request):
""" Re-initialize configuration for resource if it has been changed.
This method should be called if resource consumption strategy was changed.
"""
now = timezone.now()
# Step 1. Collect all resources with changed configuration.
changed_resources = []
for resource_model in CostTrackingRegister.registered_resources:
for resource in resource_model.objects.all():
try:
pe = models.PriceEstimate.objects.get(scope=resource, month=now.month, year=now.year)
except models.PriceEstimate.DoesNotExist:
changed_resources.append(resource)
else:
new_configuration = CostTrackingRegister.get_configuration(resource)
if new_configuration != pe.consumption_details.configuration:
changed_resources.append(resource)
# Step 2. Re-init configuration and recalculate estimate for changed resources.
for resource in changed_resources:
models.PriceEstimate.update_resource_estimate(resource, CostTrackingRegister.get_configuration(resource))
message = _('Configuration was reinitialized for %(count)s resources') % {'count': len(changed_resources)}
self.message_user(request, message)
return redirect(reverse('admin:cost_tracking_defaultpricelistitem_changelist'))
|
Encrypt the given message
def encrypt(self, message):
""" Encrypt the given message """
if not isinstance(message, (bytes, str)):
raise TypeError
return hashlib.sha1(message.encode('utf-8')).hexdigest()
|
Updates next_trigger_at field if:
- instance become active
- instance.schedule changed
- instance is new
def save(self, *args, **kwargs):
"""
Updates next_trigger_at field if:
- instance become active
- instance.schedule changed
- instance is new
"""
try:
prev_instance = self.__class__.objects.get(pk=self.pk)
except self.DoesNotExist:
prev_instance = None
if prev_instance is None or (not prev_instance.is_active and self.is_active or
self.schedule != prev_instance.schedule):
self.update_next_trigger_at()
super(ScheduleMixin, self).save(*args, **kwargs)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.