text
stringlengths 81
112k
|
|---|
Compute the partition at each `level` from the dataframe.
def levels_for(self, time_op, groups, df):
"""
Compute the partition at each `level` from the dataframe.
"""
levels = {}
for i in range(0, len(groups) + 1):
agg_df = df.groupby(groups[:i]) if i else df
levels[i] = (
agg_df.mean() if time_op == 'agg_mean'
else agg_df.sum(numeric_only=True))
return levels
|
Nest values at each level on the back-end with
access and setting, instead of summing from the bottom.
def nest_values(self, levels, level=0, metric=None, dims=()):
"""
Nest values at each level on the back-end with
access and setting, instead of summing from the bottom.
"""
if not level:
return [{
'name': m,
'val': levels[0][m],
'children': self.nest_values(levels, 1, m),
} for m in levels[0].index]
if level == 1:
return [{
'name': i,
'val': levels[1][metric][i],
'children': self.nest_values(levels, 2, metric, (i,)),
} for i in levels[1][metric].index]
if level >= len(levels):
return []
return [{
'name': i,
'val': levels[level][metric][dims][i],
'children': self.nest_values(
levels, level + 1, metric, dims + (i,),
),
} for i in levels[level][metric][dims].index]
|
Data representation of the datasource sent to the frontend
def short_data(self):
"""Data representation of the datasource sent to the frontend"""
return {
'edit_url': self.url,
'id': self.id,
'uid': self.uid,
'schema': self.schema,
'name': self.name,
'type': self.type,
'connection': self.connection,
'creator': str(self.created_by),
}
|
Data representation of the datasource sent to the frontend
def data(self):
"""Data representation of the datasource sent to the frontend"""
order_by_choices = []
# self.column_names return sorted column_names
for s in self.column_names:
s = str(s or '')
order_by_choices.append((json.dumps([s, True]), s + ' [asc]'))
order_by_choices.append((json.dumps([s, False]), s + ' [desc]'))
verbose_map = {'__timestamp': 'Time'}
verbose_map.update({
o.metric_name: o.verbose_name or o.metric_name
for o in self.metrics
})
verbose_map.update({
o.column_name: o.verbose_name or o.column_name
for o in self.columns
})
return {
# simple fields
'id': self.id,
'column_formats': self.column_formats,
'description': self.description,
'database': self.database.data, # pylint: disable=no-member
'default_endpoint': self.default_endpoint,
'filter_select': self.filter_select_enabled, # TODO deprecate
'filter_select_enabled': self.filter_select_enabled,
'name': self.name,
'datasource_name': self.datasource_name,
'type': self.type,
'schema': self.schema,
'offset': self.offset,
'cache_timeout': self.cache_timeout,
'params': self.params,
'perm': self.perm,
'edit_url': self.url,
# sqla-specific
'sql': self.sql,
# one to many
'columns': [o.data for o in self.columns],
'metrics': [o.data for o in self.metrics],
# TODO deprecate, move logic to JS
'order_by_choices': order_by_choices,
'owners': [owner.id for owner in self.owners],
'verbose_map': verbose_map,
'select_star': self.select_star,
}
|
Update ORM one-to-many list from object list
Used for syncing metrics and columns using the same code
def get_fk_many_from_list(
self, object_list, fkmany, fkmany_class, key_attr):
"""Update ORM one-to-many list from object list
Used for syncing metrics and columns using the same code"""
object_dict = {o.get(key_attr): o for o in object_list}
object_keys = [o.get(key_attr) for o in object_list]
# delete fks that have been removed
fkmany = [o for o in fkmany if getattr(o, key_attr) in object_keys]
# sync existing fks
for fk in fkmany:
obj = object_dict.get(getattr(fk, key_attr))
for attr in fkmany_class.update_from_object_fields:
setattr(fk, attr, obj.get(attr))
# create new fks
new_fks = []
orm_keys = [getattr(o, key_attr) for o in fkmany]
for obj in object_list:
key = obj.get(key_attr)
if key not in orm_keys:
del obj['id']
orm_kwargs = {}
for k in obj:
if (
k in fkmany_class.update_from_object_fields and
k in obj
):
orm_kwargs[k] = obj[k]
new_obj = fkmany_class(**orm_kwargs)
new_fks.append(new_obj)
fkmany += new_fks
return fkmany
|
Update datasource from a data structure
The UI's table editor crafts a complex data structure that
contains most of the datasource's properties as well as
an array of metrics and columns objects. This method
receives the object from the UI and syncs the datasource to
match it. Since the fields are different for the different
connectors, the implementation uses ``update_from_object_fields``
which can be defined for each connector and
defines which fields should be synced
def update_from_object(self, obj):
"""Update datasource from a data structure
The UI's table editor crafts a complex data structure that
contains most of the datasource's properties as well as
an array of metrics and columns objects. This method
receives the object from the UI and syncs the datasource to
match it. Since the fields are different for the different
connectors, the implementation uses ``update_from_object_fields``
which can be defined for each connector and
defines which fields should be synced"""
for attr in self.update_from_object_fields:
setattr(self, attr, obj.get(attr))
self.owners = obj.get('owners', [])
# Syncing metrics
metrics = self.get_fk_many_from_list(
obj.get('metrics'), self.metrics, self.metric_class, 'metric_name')
self.metrics = metrics
# Syncing columns
self.columns = self.get_fk_many_from_list(
obj.get('columns'), self.columns, self.column_class, 'column_name')
|
Returns a pandas dataframe based on the query object
def get_query_result(self, query_object):
"""Returns a pandas dataframe based on the query object"""
# Here, we assume that all the queries will use the same datasource, which is
# is a valid assumption for current setting. In a long term, we may or maynot
# support multiple queries from different data source.
timestamp_format = None
if self.datasource.type == 'table':
dttm_col = self.datasource.get_col(query_object.granularity)
if dttm_col:
timestamp_format = dttm_col.python_date_format
# The datasource here can be different backend but the interface is common
result = self.datasource.query(query_object.to_dict())
df = result.df
# Transform the timestamp we received from database to pandas supported
# datetime format. If no python_date_format is specified, the pattern will
# be considered as the default ISO date format
# If the datetime format is unix, the parse will use the corresponding
# parsing logic
if df is not None and not df.empty:
if DTTM_ALIAS in df.columns:
if timestamp_format in ('epoch_s', 'epoch_ms'):
# Column has already been formatted as a timestamp.
df[DTTM_ALIAS] = df[DTTM_ALIAS].apply(pd.Timestamp)
else:
df[DTTM_ALIAS] = pd.to_datetime(
df[DTTM_ALIAS], utc=False, format=timestamp_format)
if self.datasource.offset:
df[DTTM_ALIAS] += timedelta(hours=self.datasource.offset)
df[DTTM_ALIAS] += query_object.time_shift
if self.enforce_numerical_metrics:
self.df_metrics_to_num(df, query_object)
df.replace([np.inf, -np.inf], np.nan)
return {
'query': result.query,
'status': result.status,
'error_message': result.error_message,
'df': df,
}
|
Converting metrics to numeric when pandas.read_sql cannot
def df_metrics_to_num(self, df, query_object):
"""Converting metrics to numeric when pandas.read_sql cannot"""
metrics = [metric for metric in query_object.metrics]
for col, dtype in df.dtypes.items():
if dtype.type == np.object_ and col in metrics:
df[col] = pd.to_numeric(df[col], errors='coerce')
|
Returns a payload of metadata and data
def get_single_payload(self, query_obj):
"""Returns a payload of metadata and data"""
payload = self.get_df_payload(query_obj)
df = payload.get('df')
status = payload.get('status')
if status != utils.QueryStatus.FAILED:
if df is not None and df.empty:
payload['error'] = 'No data'
else:
payload['data'] = self.get_data(df)
if 'df' in payload:
del payload['df']
return payload
|
Handles caching around the df paylod retrieval
def get_df_payload(self, query_obj, **kwargs):
"""Handles caching around the df paylod retrieval"""
cache_key = query_obj.cache_key(
datasource=self.datasource.uid, **kwargs) if query_obj else None
logging.info('Cache key: {}'.format(cache_key))
is_loaded = False
stacktrace = None
df = None
cached_dttm = datetime.utcnow().isoformat().split('.')[0]
cache_value = None
status = None
query = ''
error_message = None
if cache_key and cache and not self.force:
cache_value = cache.get(cache_key)
if cache_value:
stats_logger.incr('loaded_from_cache')
try:
cache_value = pkl.loads(cache_value)
df = cache_value['df']
query = cache_value['query']
status = utils.QueryStatus.SUCCESS
is_loaded = True
except Exception as e:
logging.exception(e)
logging.error('Error reading cache: ' +
utils.error_msg_from_exception(e))
logging.info('Serving from cache')
if query_obj and not is_loaded:
try:
query_result = self.get_query_result(query_obj)
status = query_result['status']
query = query_result['query']
error_message = query_result['error_message']
df = query_result['df']
if status != utils.QueryStatus.FAILED:
stats_logger.incr('loaded_from_source')
is_loaded = True
except Exception as e:
logging.exception(e)
if not error_message:
error_message = '{}'.format(e)
status = utils.QueryStatus.FAILED
stacktrace = traceback.format_exc()
if (
is_loaded and
cache_key and
cache and
status != utils.QueryStatus.FAILED):
try:
cache_value = dict(
dttm=cached_dttm,
df=df if df is not None else None,
query=query,
)
cache_value = pkl.dumps(
cache_value, protocol=pkl.HIGHEST_PROTOCOL)
logging.info('Caching {} chars at key {}'.format(
len(cache_value), cache_key))
stats_logger.incr('set_cache_key')
cache.set(
cache_key,
cache_value,
timeout=self.cache_timeout)
except Exception as e:
# cache.set call can fail if the backend is down or if
# the key is too large or whatever other reasons
logging.warning('Could not cache key {}'.format(cache_key))
logging.exception(e)
cache.delete(cache_key)
return {
'cache_key': cache_key,
'cached_dttm': cache_value['dttm'] if cache_value is not None else None,
'cache_timeout': self.cache_timeout,
'df': df,
'error': error_message,
'is_cached': cache_key is not None,
'query': query,
'status': status,
'stacktrace': stacktrace,
'rowcount': len(df.index) if df is not None else 0,
}
|
Data used to render slice in templates
def data(self):
"""Data used to render slice in templates"""
d = {}
self.token = ''
try:
d = self.viz.data
self.token = d.get('token')
except Exception as e:
logging.exception(e)
d['error'] = str(e)
return {
'datasource': self.datasource_name,
'description': self.description,
'description_markeddown': self.description_markeddown,
'edit_url': self.edit_url,
'form_data': self.form_data,
'slice_id': self.id,
'slice_name': self.slice_name,
'slice_url': self.slice_url,
'modified': self.modified(),
'changed_on_humanized': self.changed_on_humanized,
'changed_on': self.changed_on.isoformat(),
}
|
Creates :py:class:viz.BaseViz object from the url_params_multidict.
:return: object of the 'viz_type' type that is taken from the
url_params_multidict or self.params.
:rtype: :py:class:viz.BaseViz
def get_viz(self, force=False):
"""Creates :py:class:viz.BaseViz object from the url_params_multidict.
:return: object of the 'viz_type' type that is taken from the
url_params_multidict or self.params.
:rtype: :py:class:viz.BaseViz
"""
slice_params = json.loads(self.params)
slice_params['slice_id'] = self.id
slice_params['json'] = 'false'
slice_params['slice_name'] = self.slice_name
slice_params['viz_type'] = self.viz_type if self.viz_type else 'table'
return viz_types[slice_params.get('viz_type')](
self.datasource,
form_data=slice_params,
force=force,
)
|
Inserts or overrides slc in the database.
remote_id and import_time fields in params_dict are set to track the
slice origin and ensure correct overrides for multiple imports.
Slice.perm is used to find the datasources and connect them.
:param Slice slc_to_import: Slice object to import
:param Slice slc_to_override: Slice to replace, id matches remote_id
:returns: The resulting id for the imported slice
:rtype: int
def import_obj(cls, slc_to_import, slc_to_override, import_time=None):
"""Inserts or overrides slc in the database.
remote_id and import_time fields in params_dict are set to track the
slice origin and ensure correct overrides for multiple imports.
Slice.perm is used to find the datasources and connect them.
:param Slice slc_to_import: Slice object to import
:param Slice slc_to_override: Slice to replace, id matches remote_id
:returns: The resulting id for the imported slice
:rtype: int
"""
session = db.session
make_transient(slc_to_import)
slc_to_import.dashboards = []
slc_to_import.alter_params(
remote_id=slc_to_import.id, import_time=import_time)
slc_to_import = slc_to_import.copy()
params = slc_to_import.params_dict
slc_to_import.datasource_id = ConnectorRegistry.get_datasource_by_name(
session, slc_to_import.datasource_type, params['datasource_name'],
params['schema'], params['database_name']).id
if slc_to_override:
slc_to_override.override(slc_to_import)
session.flush()
return slc_to_override.id
session.add(slc_to_import)
logging.info('Final slice: {}'.format(slc_to_import.to_json()))
session.flush()
return slc_to_import.id
|
Imports the dashboard from the object to the database.
Once dashboard is imported, json_metadata field is extended and stores
remote_id and import_time. It helps to decide if the dashboard has to
be overridden or just copies over. Slices that belong to this
dashboard will be wired to existing tables. This function can be used
to import/export dashboards between multiple superset instances.
Audit metadata isn't copied over.
def import_obj(cls, dashboard_to_import, import_time=None):
"""Imports the dashboard from the object to the database.
Once dashboard is imported, json_metadata field is extended and stores
remote_id and import_time. It helps to decide if the dashboard has to
be overridden or just copies over. Slices that belong to this
dashboard will be wired to existing tables. This function can be used
to import/export dashboards between multiple superset instances.
Audit metadata isn't copied over.
"""
def alter_positions(dashboard, old_to_new_slc_id_dict):
""" Updates slice_ids in the position json.
Sample position_json data:
{
"DASHBOARD_VERSION_KEY": "v2",
"DASHBOARD_ROOT_ID": {
"type": "DASHBOARD_ROOT_TYPE",
"id": "DASHBOARD_ROOT_ID",
"children": ["DASHBOARD_GRID_ID"]
},
"DASHBOARD_GRID_ID": {
"type": "DASHBOARD_GRID_TYPE",
"id": "DASHBOARD_GRID_ID",
"children": ["DASHBOARD_CHART_TYPE-2"]
},
"DASHBOARD_CHART_TYPE-2": {
"type": "DASHBOARD_CHART_TYPE",
"id": "DASHBOARD_CHART_TYPE-2",
"children": [],
"meta": {
"width": 4,
"height": 50,
"chartId": 118
}
},
}
"""
position_data = json.loads(dashboard.position_json)
position_json = position_data.values()
for value in position_json:
if (isinstance(value, dict) and value.get('meta') and
value.get('meta').get('chartId')):
old_slice_id = value.get('meta').get('chartId')
if old_slice_id in old_to_new_slc_id_dict:
value['meta']['chartId'] = (
old_to_new_slc_id_dict[old_slice_id]
)
dashboard.position_json = json.dumps(position_data)
logging.info('Started import of the dashboard: {}'
.format(dashboard_to_import.to_json()))
session = db.session
logging.info('Dashboard has {} slices'
.format(len(dashboard_to_import.slices)))
# copy slices object as Slice.import_slice will mutate the slice
# and will remove the existing dashboard - slice association
slices = copy(dashboard_to_import.slices)
old_to_new_slc_id_dict = {}
new_filter_immune_slices = []
new_timed_refresh_immune_slices = []
new_expanded_slices = {}
i_params_dict = dashboard_to_import.params_dict
remote_id_slice_map = {
slc.params_dict['remote_id']: slc
for slc in session.query(Slice).all()
if 'remote_id' in slc.params_dict
}
for slc in slices:
logging.info('Importing slice {} from the dashboard: {}'.format(
slc.to_json(), dashboard_to_import.dashboard_title))
remote_slc = remote_id_slice_map.get(slc.id)
new_slc_id = Slice.import_obj(slc, remote_slc, import_time=import_time)
old_to_new_slc_id_dict[slc.id] = new_slc_id
# update json metadata that deals with slice ids
new_slc_id_str = '{}'.format(new_slc_id)
old_slc_id_str = '{}'.format(slc.id)
if ('filter_immune_slices' in i_params_dict and
old_slc_id_str in i_params_dict['filter_immune_slices']):
new_filter_immune_slices.append(new_slc_id_str)
if ('timed_refresh_immune_slices' in i_params_dict and
old_slc_id_str in
i_params_dict['timed_refresh_immune_slices']):
new_timed_refresh_immune_slices.append(new_slc_id_str)
if ('expanded_slices' in i_params_dict and
old_slc_id_str in i_params_dict['expanded_slices']):
new_expanded_slices[new_slc_id_str] = (
i_params_dict['expanded_slices'][old_slc_id_str])
# override the dashboard
existing_dashboard = None
for dash in session.query(Dashboard).all():
if ('remote_id' in dash.params_dict and
dash.params_dict['remote_id'] ==
dashboard_to_import.id):
existing_dashboard = dash
dashboard_to_import.id = None
alter_positions(dashboard_to_import, old_to_new_slc_id_dict)
dashboard_to_import.alter_params(import_time=import_time)
if new_expanded_slices:
dashboard_to_import.alter_params(
expanded_slices=new_expanded_slices)
if new_filter_immune_slices:
dashboard_to_import.alter_params(
filter_immune_slices=new_filter_immune_slices)
if new_timed_refresh_immune_slices:
dashboard_to_import.alter_params(
timed_refresh_immune_slices=new_timed_refresh_immune_slices)
new_slices = session.query(Slice).filter(
Slice.id.in_(old_to_new_slc_id_dict.values())).all()
if existing_dashboard:
existing_dashboard.override(dashboard_to_import)
existing_dashboard.slices = new_slices
session.flush()
return existing_dashboard.id
else:
# session.add(dashboard_to_import) causes sqlachemy failures
# related to the attached users / slices. Creating new object
# allows to avoid conflicts in the sql alchemy state.
copied_dash = dashboard_to_import.copy()
copied_dash.slices = new_slices
session.add(copied_dash)
session.flush()
return copied_dash.id
|
Get the effective user, especially during impersonation.
:param url: SQL Alchemy URL object
:param user_name: Default username
:return: The effective username
def get_effective_user(self, url, user_name=None):
"""
Get the effective user, especially during impersonation.
:param url: SQL Alchemy URL object
:param user_name: Default username
:return: The effective username
"""
effective_username = None
if self.impersonate_user:
effective_username = url.username
if user_name:
effective_username = user_name
elif (
hasattr(g, 'user') and hasattr(g.user, 'username') and
g.user.username is not None
):
effective_username = g.user.username
return effective_username
|
Generates a ``select *`` statement in the proper dialect
def select_star(
self, table_name, schema=None, limit=100, show_cols=False,
indent=True, latest_partition=False, cols=None):
"""Generates a ``select *`` statement in the proper dialect"""
eng = self.get_sqla_engine(
schema=schema, source=utils.sources.get('sql_lab', None))
return self.db_engine_spec.select_star(
self, table_name, schema=schema, engine=eng,
limit=limit, show_cols=show_cols,
indent=indent, latest_partition=latest_partition, cols=cols)
|
Parameters need to be passed as keyword arguments.
def all_table_names_in_database(self, cache=False,
cache_timeout=None, force=False):
"""Parameters need to be passed as keyword arguments."""
if not self.allow_multi_schema_metadata_fetch:
return []
return self.db_engine_spec.fetch_result_sets(self, 'table')
|
Parameters need to be passed as keyword arguments.
For unused parameters, they are referenced in
cache_util.memoized_func decorator.
:param schema: schema name
:type schema: str
:param cache: whether cache is enabled for the function
:type cache: bool
:param cache_timeout: timeout in seconds for the cache
:type cache_timeout: int
:param force: whether to force refresh the cache
:type force: bool
:return: table list
:rtype: list
def all_table_names_in_schema(self, schema, cache=False,
cache_timeout=None, force=False):
"""Parameters need to be passed as keyword arguments.
For unused parameters, they are referenced in
cache_util.memoized_func decorator.
:param schema: schema name
:type schema: str
:param cache: whether cache is enabled for the function
:type cache: bool
:param cache_timeout: timeout in seconds for the cache
:type cache_timeout: int
:param force: whether to force refresh the cache
:type force: bool
:return: table list
:rtype: list
"""
tables = []
try:
tables = self.db_engine_spec.get_table_names(
inspector=self.inspector, schema=schema)
except Exception as e:
logging.exception(e)
return tables
|
Parameters need to be passed as keyword arguments.
For unused parameters, they are referenced in
cache_util.memoized_func decorator.
:param schema: schema name
:type schema: str
:param cache: whether cache is enabled for the function
:type cache: bool
:param cache_timeout: timeout in seconds for the cache
:type cache_timeout: int
:param force: whether to force refresh the cache
:type force: bool
:return: view list
:rtype: list
def all_view_names_in_schema(self, schema, cache=False,
cache_timeout=None, force=False):
"""Parameters need to be passed as keyword arguments.
For unused parameters, they are referenced in
cache_util.memoized_func decorator.
:param schema: schema name
:type schema: str
:param cache: whether cache is enabled for the function
:type cache: bool
:param cache_timeout: timeout in seconds for the cache
:type cache_timeout: int
:param force: whether to force refresh the cache
:type force: bool
:return: view list
:rtype: list
"""
views = []
try:
views = self.db_engine_spec.get_view_names(
inspector=self.inspector, schema=schema)
except Exception as e:
logging.exception(e)
return views
|
Parameters need to be passed as keyword arguments.
For unused parameters, they are referenced in
cache_util.memoized_func decorator.
:param cache: whether cache is enabled for the function
:type cache: bool
:param cache_timeout: timeout in seconds for the cache
:type cache_timeout: int
:param force: whether to force refresh the cache
:type force: bool
:return: schema list
:rtype: list
def all_schema_names(self, cache=False, cache_timeout=None, force=False):
"""Parameters need to be passed as keyword arguments.
For unused parameters, they are referenced in
cache_util.memoized_func decorator.
:param cache: whether cache is enabled for the function
:type cache: bool
:param cache_timeout: timeout in seconds for the cache
:type cache_timeout: int
:param force: whether to force refresh the cache
:type force: bool
:return: schema list
:rtype: list
"""
return self.db_engine_spec.get_schema_names(self.inspector)
|
Allowing to lookup grain by either label or duration
For backward compatibility
def grains_dict(self):
"""Allowing to lookup grain by either label or duration
For backward compatibility"""
d = {grain.duration: grain for grain in self.grains()}
d.update({grain.label: grain for grain in self.grains()})
return d
|
Decorator to log user actions
def log_this(cls, f):
"""Decorator to log user actions"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
user_id = None
if g.user:
user_id = g.user.get_id()
d = request.form.to_dict() or {}
# request parameters can overwrite post body
request_params = request.args.to_dict()
d.update(request_params)
d.update(kwargs)
slice_id = d.get('slice_id')
dashboard_id = d.get('dashboard_id')
try:
slice_id = int(
slice_id or json.loads(d.get('form_data')).get('slice_id'))
except (ValueError, TypeError):
slice_id = 0
stats_logger.incr(f.__name__)
start_dttm = datetime.now()
value = f(*args, **kwargs)
duration_ms = (datetime.now() - start_dttm).total_seconds() * 1000
# bulk insert
try:
explode_by = d.get('explode')
records = json.loads(d.get(explode_by))
except Exception:
records = [d]
referrer = request.referrer[:1000] if request.referrer else None
logs = []
for record in records:
try:
json_string = json.dumps(record)
except Exception:
json_string = None
log = cls(
action=f.__name__,
json=json_string,
dashboard_id=dashboard_id,
slice_id=slice_id,
duration_ms=duration_ms,
referrer=referrer,
user_id=user_id)
logs.append(log)
sesh = db.session()
sesh.bulk_save_objects(logs)
sesh.commit()
return value
return wrapper
|
A decorator to label an endpoint as an API. Catches uncaught exceptions and
return the response in the JSON format
def api(f):
"""
A decorator to label an endpoint as an API. Catches uncaught exceptions and
return the response in the JSON format
"""
def wraps(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
except Exception as e:
logging.exception(e)
return json_error_response(get_error_msg())
return functools.update_wrapper(wraps, f)
|
A decorator to catch superset exceptions. Use it after the @api decorator above
so superset exception handler is triggered before the handler for generic exceptions.
def handle_api_exception(f):
"""
A decorator to catch superset exceptions. Use it after the @api decorator above
so superset exception handler is triggered before the handler for generic exceptions.
"""
def wraps(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
except SupersetSecurityException as e:
logging.exception(e)
return json_error_response(utils.error_msg_from_exception(e),
status=e.status,
stacktrace=traceback.format_exc(),
link=e.link)
except SupersetException as e:
logging.exception(e)
return json_error_response(utils.error_msg_from_exception(e),
stacktrace=traceback.format_exc(),
status=e.status)
except Exception as e:
logging.exception(e)
return json_error_response(utils.error_msg_from_exception(e),
stacktrace=traceback.format_exc())
return functools.update_wrapper(wraps, f)
|
Meant to be used in `pre_update` hooks on models to enforce ownership
Admin have all access, and other users need to be referenced on either
the created_by field that comes with the ``AuditMixin``, or in a field
named ``owners`` which is expected to be a one-to-many with the User
model. It is meant to be used in the ModelView's pre_update hook in
which raising will abort the update.
def check_ownership(obj, raise_if_false=True):
"""Meant to be used in `pre_update` hooks on models to enforce ownership
Admin have all access, and other users need to be referenced on either
the created_by field that comes with the ``AuditMixin``, or in a field
named ``owners`` which is expected to be a one-to-many with the User
model. It is meant to be used in the ModelView's pre_update hook in
which raising will abort the update.
"""
if not obj:
return False
security_exception = SupersetSecurityException(
"You don't have the rights to alter [{}]".format(obj))
if g.user.is_anonymous:
if raise_if_false:
raise security_exception
return False
roles = [r.name for r in get_user_roles()]
if 'Admin' in roles:
return True
session = db.create_scoped_session()
orig_obj = session.query(obj.__class__).filter_by(id=obj.id).first()
# Making a list of owners that works across ORM models
owners = []
if hasattr(orig_obj, 'owners'):
owners += orig_obj.owners
if hasattr(orig_obj, 'owner'):
owners += [orig_obj.owner]
if hasattr(orig_obj, 'created_by'):
owners += [orig_obj.created_by]
owner_names = [o.username for o in owners if o]
if (
g.user and hasattr(g.user, 'username') and
g.user.username in owner_names):
return True
if raise_if_false:
raise security_exception
else:
return False
|
Customize how fields are bound by stripping all whitespace.
:param form: The form
:param unbound_field: The unbound field
:param options: The field options
:returns: The bound field
def bind_field(
self,
form: DynamicForm,
unbound_field: UnboundField,
options: Dict[Any, Any],
) -> Field:
"""
Customize how fields are bound by stripping all whitespace.
:param form: The form
:param unbound_field: The unbound field
:param options: The field options
:returns: The bound field
"""
filters = unbound_field.kwargs.get('filters', [])
filters.append(lambda x: x.strip() if isinstance(x, str) else x)
return unbound_field.bind(form=form, filters=filters, **options)
|
Common data always sent to the client
def common_bootsrap_payload(self):
"""Common data always sent to the client"""
messages = get_flashed_messages(with_categories=True)
locale = str(get_locale())
return {
'flash_messages': messages,
'conf': {k: conf.get(k) for k in FRONTEND_CONF_KEYS},
'locale': locale,
'language_pack': get_language_pack(locale),
'feature_flags': get_feature_flags(),
}
|
Delete function logic, override to implement diferent logic
deletes the record with primary_key = pk
:param pk:
record primary key to delete
def _delete(self, pk):
"""
Delete function logic, override to implement diferent logic
deletes the record with primary_key = pk
:param pk:
record primary key to delete
"""
item = self.datamodel.get(pk, self._base_filters)
if not item:
abort(404)
try:
self.pre_delete(item)
except Exception as e:
flash(str(e), 'danger')
else:
view_menu = security_manager.find_view_menu(item.get_perm())
pvs = security_manager.get_session.query(
security_manager.permissionview_model).filter_by(
view_menu=view_menu).all()
schema_view_menu = None
if hasattr(item, 'schema_perm'):
schema_view_menu = security_manager.find_view_menu(item.schema_perm)
pvs.extend(security_manager.get_session.query(
security_manager.permissionview_model).filter_by(
view_menu=schema_view_menu).all())
if self.datamodel.delete(item):
self.post_delete(item)
for pv in pvs:
security_manager.get_session.delete(pv)
if view_menu:
security_manager.get_session.delete(view_menu)
if schema_view_menu:
security_manager.get_session.delete(schema_view_menu)
security_manager.get_session.commit()
flash(*self.datamodel.message)
self.update_redirect()
|
Returns a set of tuples with the perm name and view menu name
def get_all_permissions(self):
"""Returns a set of tuples with the perm name and view menu name"""
perms = set()
for role in self.get_user_roles():
for perm_view in role.permissions:
t = (perm_view.permission.name, perm_view.view_menu.name)
perms.add(t)
return perms
|
Returns the details of view_menus for a perm name
def get_view_menus(self, permission_name):
"""Returns the details of view_menus for a perm name"""
vm = set()
for perm_name, vm_name in self.get_all_permissions():
if perm_name == permission_name:
vm.add(vm_name)
return vm
|
Destroy a driver
def destroy_webdriver(driver):
"""
Destroy a driver
"""
# This is some very flaky code in selenium. Hence the retries
# and catch-all exceptions
try:
retry_call(driver.close, tries=2)
except Exception:
pass
try:
driver.quit()
except Exception:
pass
|
Given a schedule, delivery the dashboard as an email report
def deliver_dashboard(schedule):
"""
Given a schedule, delivery the dashboard as an email report
"""
dashboard = schedule.dashboard
dashboard_url = _get_url_path(
'Superset.dashboard',
dashboard_id=dashboard.id,
)
# Create a driver, fetch the page, wait for the page to render
driver = create_webdriver()
window = config.get('WEBDRIVER_WINDOW')['dashboard']
driver.set_window_size(*window)
driver.get(dashboard_url)
time.sleep(PAGE_RENDER_WAIT)
# Set up a function to retry once for the element.
# This is buggy in certain selenium versions with firefox driver
get_element = getattr(driver, 'find_element_by_class_name')
element = retry_call(
get_element,
fargs=['grid-container'],
tries=2,
delay=PAGE_RENDER_WAIT,
)
try:
screenshot = element.screenshot_as_png
except WebDriverException:
# Some webdrivers do not support screenshots for elements.
# In such cases, take a screenshot of the entire page.
screenshot = driver.screenshot() # pylint: disable=no-member
finally:
destroy_webdriver(driver)
# Generate the email body and attachments
email = _generate_mail_content(
schedule,
screenshot,
dashboard.dashboard_title,
dashboard_url,
)
subject = __(
'%(prefix)s %(title)s',
prefix=config.get('EMAIL_REPORTS_SUBJECT_PREFIX'),
title=dashboard.dashboard_title,
)
_deliver_email(schedule, subject, email)
|
Given a schedule, delivery the slice as an email report
def deliver_slice(schedule):
"""
Given a schedule, delivery the slice as an email report
"""
if schedule.email_format == SliceEmailReportFormat.data:
email = _get_slice_data(schedule)
elif schedule.email_format == SliceEmailReportFormat.visualization:
email = _get_slice_visualization(schedule)
else:
raise RuntimeError('Unknown email report format')
subject = __(
'%(prefix)s %(title)s',
prefix=config.get('EMAIL_REPORTS_SUBJECT_PREFIX'),
title=schedule.slice.slice_name,
)
_deliver_email(schedule, subject, email)
|
Find all active schedules and schedule celery tasks for
each of them with a specific ETA (determined by parsing
the cron schedule for the schedule)
def schedule_window(report_type, start_at, stop_at, resolution):
"""
Find all active schedules and schedule celery tasks for
each of them with a specific ETA (determined by parsing
the cron schedule for the schedule)
"""
model_cls = get_scheduler_model(report_type)
dbsession = db.create_scoped_session()
schedules = dbsession.query(model_cls).filter(model_cls.active.is_(True))
for schedule in schedules:
args = (
report_type,
schedule.id,
)
# Schedule the job for the specified time window
for eta in next_schedules(schedule.crontab,
start_at,
stop_at,
resolution=resolution):
schedule_email_report.apply_async(args, eta=eta)
|
Celery beat job meant to be invoked hourly
def schedule_hourly():
""" Celery beat job meant to be invoked hourly """
if not config.get('ENABLE_SCHEDULED_EMAIL_REPORTS'):
logging.info('Scheduled email reports not enabled in config')
return
resolution = config.get('EMAIL_REPORTS_CRON_RESOLUTION', 0) * 60
# Get the top of the hour
start_at = datetime.now(tzlocal()).replace(microsecond=0, second=0, minute=0)
stop_at = start_at + timedelta(seconds=3600)
schedule_window(ScheduleType.dashboard.value, start_at, stop_at, resolution)
schedule_window(ScheduleType.slice.value, start_at, stop_at, resolution)
|
De-duplicates a list of string by suffixing a counter
Always returns the same number of entries as provided, and always returns
unique values. Case sensitive comparison by default.
>>> print(','.join(dedup(['foo', 'bar', 'bar', 'bar', 'Bar'])))
foo,bar,bar__1,bar__2,Bar
>>> print(','.join(dedup(['foo', 'bar', 'bar', 'bar', 'Bar'], case_sensitive=False)))
foo,bar,bar__1,bar__2,Bar__3
def dedup(l, suffix='__', case_sensitive=True):
"""De-duplicates a list of string by suffixing a counter
Always returns the same number of entries as provided, and always returns
unique values. Case sensitive comparison by default.
>>> print(','.join(dedup(['foo', 'bar', 'bar', 'bar', 'Bar'])))
foo,bar,bar__1,bar__2,Bar
>>> print(','.join(dedup(['foo', 'bar', 'bar', 'bar', 'Bar'], case_sensitive=False)))
foo,bar,bar__1,bar__2,Bar__3
"""
new_l = []
seen = {}
for s in l:
s_fixed_case = s if case_sensitive else s.lower()
if s_fixed_case in seen:
seen[s_fixed_case] += 1
s += suffix + str(seen[s_fixed_case])
else:
seen[s_fixed_case] = 0
new_l.append(s)
return new_l
|
Given a numpy dtype, Returns a generic database type
def db_type(cls, dtype):
"""Given a numpy dtype, Returns a generic database type"""
if isinstance(dtype, ExtensionDtype):
return cls.type_map.get(dtype.kind)
elif hasattr(dtype, 'char'):
return cls.type_map.get(dtype.char)
|
Provides metadata about columns for data visualization.
:return: dict, with the fields name, type, is_date, is_dim and agg.
def columns(self):
"""Provides metadata about columns for data visualization.
:return: dict, with the fields name, type, is_date, is_dim and agg.
"""
if self.df.empty:
return None
columns = []
sample_size = min(INFER_COL_TYPES_SAMPLE_SIZE, len(self.df.index))
sample = self.df
if sample_size:
sample = self.df.sample(sample_size)
for col in self.df.dtypes.keys():
db_type_str = (
self._type_dict.get(col) or
self.db_type(self.df.dtypes[col])
)
column = {
'name': col,
'agg': self.agg_func(self.df.dtypes[col], col),
'type': db_type_str,
'is_date': self.is_date(self.df.dtypes[col], db_type_str),
'is_dim': self.is_dimension(self.df.dtypes[col], col),
}
if not db_type_str or db_type_str.upper() == 'OBJECT':
v = sample[col].iloc[0] if not sample[col].empty else None
if isinstance(v, str):
column['type'] = 'STRING'
elif isinstance(v, int):
column['type'] = 'INT'
elif isinstance(v, float):
column['type'] = 'FLOAT'
elif isinstance(v, (datetime, date)):
column['type'] = 'DATETIME'
column['is_date'] = True
column['is_dim'] = False
# check if encoded datetime
if (
column['type'] == 'STRING' and
self.datetime_conversion_rate(sample[col]) >
INFER_COL_TYPES_THRESHOLD):
column.update({
'is_date': True,
'is_dim': False,
'agg': None,
})
# 'agg' is optional attribute
if not column['agg']:
column.pop('agg', None)
columns.append(column)
return columns
|
Getting the time component of the query
def get_timestamp_expression(self, time_grain):
"""Getting the time component of the query"""
label = utils.DTTM_ALIAS
db = self.table.database
pdf = self.python_date_format
is_epoch = pdf in ('epoch_s', 'epoch_ms')
if not self.expression and not time_grain and not is_epoch:
sqla_col = column(self.column_name, type_=DateTime)
return self.table.make_sqla_column_compatible(sqla_col, label)
grain = None
if time_grain:
grain = db.grains_dict().get(time_grain)
if not grain:
raise NotImplementedError(
f'No grain spec for {time_grain} for database {db.database_name}')
col = db.db_engine_spec.get_timestamp_column(self.expression, self.column_name)
expr = db.db_engine_spec.get_time_expr(col, pdf, time_grain, grain)
sqla_col = literal_column(expr, type_=DateTime)
return self.table.make_sqla_column_compatible(sqla_col, label)
|
Convert datetime object to a SQL expression string
If database_expression is empty, the internal dttm
will be parsed as the string with the pattern that
the user inputted (python_date_format)
If database_expression is not empty, the internal dttm
will be parsed as the sql sentence for the database to convert
def dttm_sql_literal(self, dttm, is_epoch_in_utc):
"""Convert datetime object to a SQL expression string
If database_expression is empty, the internal dttm
will be parsed as the string with the pattern that
the user inputted (python_date_format)
If database_expression is not empty, the internal dttm
will be parsed as the sql sentence for the database to convert
"""
tf = self.python_date_format
if self.database_expression:
return self.database_expression.format(dttm.strftime('%Y-%m-%d %H:%M:%S'))
elif tf:
if is_epoch_in_utc:
seconds_since_epoch = dttm.timestamp()
else:
seconds_since_epoch = (dttm - datetime(1970, 1, 1)).total_seconds()
seconds_since_epoch = int(seconds_since_epoch)
if tf == 'epoch_s':
return str(seconds_since_epoch)
elif tf == 'epoch_ms':
return str(seconds_since_epoch * 1000)
return "'{}'".format(dttm.strftime(tf))
else:
s = self.table.database.db_engine_spec.convert_dttm(
self.type or '', dttm)
return s or "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S.%f'))
|
Takes a sql alchemy column object and adds label info if supported by engine.
:param sqla_col: sql alchemy column instance
:param label: alias/label that column is expected to have
:return: either a sql alchemy column or label instance if supported by engine
def make_sqla_column_compatible(self, sqla_col, label=None):
"""Takes a sql alchemy column object and adds label info if supported by engine.
:param sqla_col: sql alchemy column instance
:param label: alias/label that column is expected to have
:return: either a sql alchemy column or label instance if supported by engine
"""
label_expected = label or sqla_col.name
db_engine_spec = self.database.db_engine_spec
if db_engine_spec.supports_column_aliases:
label = db_engine_spec.make_label_compatible(label_expected)
sqla_col = sqla_col.label(label)
sqla_col._df_label_expected = label_expected
return sqla_col
|
Runs query against sqla to retrieve some
sample values for the given column.
def values_for_column(self, column_name, limit=10000):
"""Runs query against sqla to retrieve some
sample values for the given column.
"""
cols = {col.column_name: col for col in self.columns}
target_col = cols[column_name]
tp = self.get_template_processor()
qry = (
select([target_col.get_sqla_col()])
.select_from(self.get_from_clause(tp))
.distinct()
)
if limit:
qry = qry.limit(limit)
if self.fetch_values_predicate:
tp = self.get_template_processor()
qry = qry.where(tp.process_template(self.fetch_values_predicate))
engine = self.database.get_sqla_engine()
sql = '{}'.format(
qry.compile(engine, compile_kwargs={'literal_binds': True}),
)
sql = self.mutate_query_from_config(sql)
df = pd.read_sql_query(sql=sql, con=engine)
return [row[0] for row in df.to_records(index=False)]
|
Apply config's SQL_QUERY_MUTATOR
Typically adds comments to the query with context
def mutate_query_from_config(self, sql):
"""Apply config's SQL_QUERY_MUTATOR
Typically adds comments to the query with context"""
SQL_QUERY_MUTATOR = config.get('SQL_QUERY_MUTATOR')
if SQL_QUERY_MUTATOR:
username = utils.get_username()
sql = SQL_QUERY_MUTATOR(sql, username, security_manager, self.database)
return sql
|
Turn an adhoc metric into a sqlalchemy column.
:param dict metric: Adhoc metric definition
:param dict cols: Columns for the current table
:returns: The metric defined as a sqlalchemy column
:rtype: sqlalchemy.sql.column
def adhoc_metric_to_sqla(self, metric, cols):
"""
Turn an adhoc metric into a sqlalchemy column.
:param dict metric: Adhoc metric definition
:param dict cols: Columns for the current table
:returns: The metric defined as a sqlalchemy column
:rtype: sqlalchemy.sql.column
"""
expression_type = metric.get('expressionType')
label = utils.get_metric_name(metric)
if expression_type == utils.ADHOC_METRIC_EXPRESSION_TYPES['SIMPLE']:
column_name = metric.get('column').get('column_name')
table_column = cols.get(column_name)
if table_column:
sqla_column = table_column.get_sqla_col()
else:
sqla_column = column(column_name)
sqla_metric = self.sqla_aggregations[metric.get('aggregate')](sqla_column)
elif expression_type == utils.ADHOC_METRIC_EXPRESSION_TYPES['SQL']:
sqla_metric = literal_column(metric.get('sqlExpression'))
else:
return None
return self.make_sqla_column_compatible(sqla_metric, label)
|
Querying any sqla table from this common interface
def get_sqla_query( # sqla
self,
groupby, metrics,
granularity,
from_dttm, to_dttm,
filter=None, # noqa
is_timeseries=True,
timeseries_limit=15,
timeseries_limit_metric=None,
row_limit=None,
inner_from_dttm=None,
inner_to_dttm=None,
orderby=None,
extras=None,
columns=None,
order_desc=True,
prequeries=None,
is_prequery=False,
):
"""Querying any sqla table from this common interface"""
template_kwargs = {
'from_dttm': from_dttm,
'groupby': groupby,
'metrics': metrics,
'row_limit': row_limit,
'to_dttm': to_dttm,
'filter': filter,
'columns': {col.column_name: col for col in self.columns},
}
template_kwargs.update(self.template_params_dict)
template_processor = self.get_template_processor(**template_kwargs)
db_engine_spec = self.database.db_engine_spec
orderby = orderby or []
# For backward compatibility
if granularity not in self.dttm_cols:
granularity = self.main_dttm_col
# Database spec supports join-free timeslot grouping
time_groupby_inline = db_engine_spec.time_groupby_inline
cols = {col.column_name: col for col in self.columns}
metrics_dict = {m.metric_name: m for m in self.metrics}
if not granularity and is_timeseries:
raise Exception(_(
'Datetime column not provided as part table configuration '
'and is required by this type of chart'))
if not groupby and not metrics and not columns:
raise Exception(_('Empty query?'))
metrics_exprs = []
for m in metrics:
if utils.is_adhoc_metric(m):
metrics_exprs.append(self.adhoc_metric_to_sqla(m, cols))
elif m in metrics_dict:
metrics_exprs.append(metrics_dict.get(m).get_sqla_col())
else:
raise Exception(_("Metric '{}' is not valid".format(m)))
if metrics_exprs:
main_metric_expr = metrics_exprs[0]
else:
main_metric_expr, label = literal_column('COUNT(*)'), 'ccount'
main_metric_expr = self.make_sqla_column_compatible(main_metric_expr, label)
select_exprs = []
groupby_exprs_sans_timestamp = OrderedDict()
if groupby:
select_exprs = []
for s in groupby:
if s in cols:
outer = cols[s].get_sqla_col()
else:
outer = literal_column(f'({s})')
outer = self.make_sqla_column_compatible(outer, s)
groupby_exprs_sans_timestamp[outer.name] = outer
select_exprs.append(outer)
elif columns:
for s in columns:
select_exprs.append(
cols[s].get_sqla_col() if s in cols else
self.make_sqla_column_compatible(literal_column(s)))
metrics_exprs = []
groupby_exprs_with_timestamp = OrderedDict(groupby_exprs_sans_timestamp.items())
if granularity:
dttm_col = cols[granularity]
time_grain = extras.get('time_grain_sqla')
time_filters = []
if is_timeseries:
timestamp = dttm_col.get_timestamp_expression(time_grain)
select_exprs += [timestamp]
groupby_exprs_with_timestamp[timestamp.name] = timestamp
# Use main dttm column to support index with secondary dttm columns
if db_engine_spec.time_secondary_columns and \
self.main_dttm_col in self.dttm_cols and \
self.main_dttm_col != dttm_col.column_name:
time_filters.append(cols[self.main_dttm_col].
get_time_filter(from_dttm, to_dttm))
time_filters.append(dttm_col.get_time_filter(from_dttm, to_dttm))
select_exprs += metrics_exprs
labels_expected = [c._df_label_expected for c in select_exprs]
select_exprs = db_engine_spec.make_select_compatible(
groupby_exprs_with_timestamp.values(),
select_exprs)
qry = sa.select(select_exprs)
tbl = self.get_from_clause(template_processor)
if not columns:
qry = qry.group_by(*groupby_exprs_with_timestamp.values())
where_clause_and = []
having_clause_and = []
for flt in filter:
if not all([flt.get(s) for s in ['col', 'op']]):
continue
col = flt['col']
op = flt['op']
col_obj = cols.get(col)
if col_obj:
is_list_target = op in ('in', 'not in')
eq = self.filter_values_handler(
flt.get('val'),
target_column_is_numeric=col_obj.is_num,
is_list_target=is_list_target)
if op in ('in', 'not in'):
cond = col_obj.get_sqla_col().in_(eq)
if '<NULL>' in eq:
cond = or_(cond, col_obj.get_sqla_col() == None) # noqa
if op == 'not in':
cond = ~cond
where_clause_and.append(cond)
else:
if col_obj.is_num:
eq = utils.string_to_num(flt['val'])
if op == '==':
where_clause_and.append(col_obj.get_sqla_col() == eq)
elif op == '!=':
where_clause_and.append(col_obj.get_sqla_col() != eq)
elif op == '>':
where_clause_and.append(col_obj.get_sqla_col() > eq)
elif op == '<':
where_clause_and.append(col_obj.get_sqla_col() < eq)
elif op == '>=':
where_clause_and.append(col_obj.get_sqla_col() >= eq)
elif op == '<=':
where_clause_and.append(col_obj.get_sqla_col() <= eq)
elif op == 'LIKE':
where_clause_and.append(col_obj.get_sqla_col().like(eq))
elif op == 'IS NULL':
where_clause_and.append(col_obj.get_sqla_col() == None) # noqa
elif op == 'IS NOT NULL':
where_clause_and.append(
col_obj.get_sqla_col() != None) # noqa
if extras:
where = extras.get('where')
if where:
where = template_processor.process_template(where)
where_clause_and += [sa.text('({})'.format(where))]
having = extras.get('having')
if having:
having = template_processor.process_template(having)
having_clause_and += [sa.text('({})'.format(having))]
if granularity:
qry = qry.where(and_(*(time_filters + where_clause_and)))
else:
qry = qry.where(and_(*where_clause_and))
qry = qry.having(and_(*having_clause_and))
if not orderby and not columns:
orderby = [(main_metric_expr, not order_desc)]
for col, ascending in orderby:
direction = asc if ascending else desc
if utils.is_adhoc_metric(col):
col = self.adhoc_metric_to_sqla(col, cols)
qry = qry.order_by(direction(col))
if row_limit:
qry = qry.limit(row_limit)
if is_timeseries and \
timeseries_limit and groupby and not time_groupby_inline:
if self.database.db_engine_spec.inner_joins:
# some sql dialects require for order by expressions
# to also be in the select clause -- others, e.g. vertica,
# require a unique inner alias
inner_main_metric_expr = self.make_sqla_column_compatible(
main_metric_expr, 'mme_inner__')
inner_groupby_exprs = []
inner_select_exprs = []
for gby_name, gby_obj in groupby_exprs_sans_timestamp.items():
inner = self.make_sqla_column_compatible(gby_obj, gby_name + '__')
inner_groupby_exprs.append(inner)
inner_select_exprs.append(inner)
inner_select_exprs += [inner_main_metric_expr]
subq = select(inner_select_exprs).select_from(tbl)
inner_time_filter = dttm_col.get_time_filter(
inner_from_dttm or from_dttm,
inner_to_dttm or to_dttm,
)
subq = subq.where(and_(*(where_clause_and + [inner_time_filter])))
subq = subq.group_by(*inner_groupby_exprs)
ob = inner_main_metric_expr
if timeseries_limit_metric:
ob = self._get_timeseries_orderby(
timeseries_limit_metric,
metrics_dict,
cols,
)
direction = desc if order_desc else asc
subq = subq.order_by(direction(ob))
subq = subq.limit(timeseries_limit)
on_clause = []
for gby_name, gby_obj in groupby_exprs_sans_timestamp.items():
# in this case the column name, not the alias, needs to be
# conditionally mutated, as it refers to the column alias in
# the inner query
col_name = db_engine_spec.make_label_compatible(gby_name + '__')
on_clause.append(gby_obj == column(col_name))
tbl = tbl.join(subq.alias(), and_(*on_clause))
else:
if timeseries_limit_metric:
orderby = [(
self._get_timeseries_orderby(
timeseries_limit_metric,
metrics_dict,
cols,
),
False,
)]
# run subquery to get top groups
subquery_obj = {
'prequeries': prequeries,
'is_prequery': True,
'is_timeseries': False,
'row_limit': timeseries_limit,
'groupby': groupby,
'metrics': metrics,
'granularity': granularity,
'from_dttm': inner_from_dttm or from_dttm,
'to_dttm': inner_to_dttm or to_dttm,
'filter': filter,
'orderby': orderby,
'extras': extras,
'columns': columns,
'order_desc': True,
}
result = self.query(subquery_obj)
dimensions = [
c for c in result.df.columns
if c not in metrics and c in groupby_exprs_sans_timestamp
]
top_groups = self._get_top_groups(result.df,
dimensions,
groupby_exprs_sans_timestamp)
qry = qry.where(top_groups)
return SqlaQuery(sqla_query=qry.select_from(tbl),
labels_expected=labels_expected)
|
Fetches the metadata for the table and merges it in
def fetch_metadata(self):
"""Fetches the metadata for the table and merges it in"""
try:
table = self.get_sqla_table_object()
except Exception as e:
logging.exception(e)
raise Exception(_(
"Table [{}] doesn't seem to exist in the specified database, "
"couldn't fetch column information").format(self.table_name))
M = SqlMetric # noqa
metrics = []
any_date_col = None
db_engine_spec = self.database.db_engine_spec
db_dialect = self.database.get_dialect()
dbcols = (
db.session.query(TableColumn)
.filter(TableColumn.table == self)
.filter(or_(TableColumn.column_name == col.name
for col in table.columns)))
dbcols = {dbcol.column_name: dbcol for dbcol in dbcols}
for col in table.columns:
try:
datatype = col.type.compile(dialect=db_dialect).upper()
except Exception as e:
datatype = 'UNKNOWN'
logging.error(
'Unrecognized data type in {}.{}'.format(table, col.name))
logging.exception(e)
dbcol = dbcols.get(col.name, None)
if not dbcol:
dbcol = TableColumn(column_name=col.name, type=datatype)
dbcol.sum = dbcol.is_num
dbcol.avg = dbcol.is_num
dbcol.is_dttm = dbcol.is_time
db_engine_spec.alter_new_orm_column(dbcol)
else:
dbcol.type = datatype
dbcol.groupby = True
dbcol.filterable = True
self.columns.append(dbcol)
if not any_date_col and dbcol.is_time:
any_date_col = col.name
metrics.append(M(
metric_name='count',
verbose_name='COUNT(*)',
metric_type='count',
expression='COUNT(*)',
))
if not self.main_dttm_col:
self.main_dttm_col = any_date_col
self.add_missing_metrics(metrics)
db.session.merge(self)
db.session.commit()
|
Imports the datasource from the object to the database.
Metrics and columns and datasource will be overrided if exists.
This function can be used to import/export dashboards between multiple
superset instances. Audit metadata isn't copies over.
def import_obj(cls, i_datasource, import_time=None):
"""Imports the datasource from the object to the database.
Metrics and columns and datasource will be overrided if exists.
This function can be used to import/export dashboards between multiple
superset instances. Audit metadata isn't copies over.
"""
def lookup_sqlatable(table):
return db.session.query(SqlaTable).join(Database).filter(
SqlaTable.table_name == table.table_name,
SqlaTable.schema == table.schema,
Database.id == table.database_id,
).first()
def lookup_database(table):
return db.session.query(Database).filter_by(
database_name=table.params_dict['database_name']).one()
return import_datasource.import_datasource(
db.session, i_datasource, lookup_database, lookup_sqlatable,
import_time)
|
Loading lat/long data from a csv file in the repo
def load_long_lat_data():
"""Loading lat/long data from a csv file in the repo"""
data = get_example_data('san_francisco.csv.gz', make_bytes=True)
pdf = pd.read_csv(data, encoding='utf-8')
start = datetime.datetime.now().replace(
hour=0, minute=0, second=0, microsecond=0)
pdf['datetime'] = [
start + datetime.timedelta(hours=i * 24 / (len(pdf) - 1))
for i in range(len(pdf))
]
pdf['occupancy'] = [random.randint(1, 6) for _ in range(len(pdf))]
pdf['radius_miles'] = [random.uniform(1, 3) for _ in range(len(pdf))]
pdf['geohash'] = pdf[['LAT', 'LON']].apply(
lambda x: geohash.encode(*x), axis=1)
pdf['delimited'] = pdf['LAT'].map(str).str.cat(pdf['LON'].map(str), sep=',')
pdf.to_sql( # pylint: disable=no-member
'long_lat',
db.engine,
if_exists='replace',
chunksize=500,
dtype={
'longitude': Float(),
'latitude': Float(),
'number': Float(),
'street': String(100),
'unit': String(10),
'city': String(50),
'district': String(50),
'region': String(50),
'postcode': Float(),
'id': String(100),
'datetime': DateTime(),
'occupancy': Float(),
'radius_miles': Float(),
'geohash': String(12),
'delimited': String(60),
},
index=False)
print('Done loading table!')
print('-' * 80)
print('Creating table reference')
obj = db.session.query(TBL).filter_by(table_name='long_lat').first()
if not obj:
obj = TBL(table_name='long_lat')
obj.main_dttm_col = 'datetime'
obj.database = utils.get_or_create_main_db()
db.session.merge(obj)
db.session.commit()
obj.fetch_metadata()
tbl = obj
slice_data = {
'granularity_sqla': 'day',
'since': '2014-01-01',
'until': 'now',
'where': '',
'viz_type': 'mapbox',
'all_columns_x': 'LON',
'all_columns_y': 'LAT',
'mapbox_style': 'mapbox://styles/mapbox/light-v9',
'all_columns': ['occupancy'],
'row_limit': 500000,
}
print('Creating a slice')
slc = Slice(
slice_name='Mapbox Long/Lat',
viz_type='mapbox',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(slice_data),
)
misc_dash_slices.add(slc.slice_name)
merge_slice(slc)
|
Gets column info from the source system
def external_metadata(self, datasource_type=None, datasource_id=None):
"""Gets column info from the source system"""
if datasource_type == 'druid':
datasource = ConnectorRegistry.get_datasource(
datasource_type, datasource_id, db.session)
elif datasource_type == 'table':
database = (
db.session
.query(Database)
.filter_by(id=request.args.get('db_id'))
.one()
)
Table = ConnectorRegistry.sources['table']
datasource = Table(
database=database,
table_name=request.args.get('table_name'),
schema=request.args.get('schema') or None,
)
external_metadata = datasource.external_metadata()
return self.json_response(external_metadata)
|
Returns a list of non empty values or None
def filter_not_empty_values(value):
"""Returns a list of non empty values or None"""
if not value:
return None
data = [x for x in value if x]
if not data:
return None
return data
|
If the user has access to the database or all datasource
1. if schemas_allowed_for_csv_upload is empty
a) if database does not support schema
user is able to upload csv without specifying schema name
b) if database supports schema
user is able to upload csv to any schema
2. if schemas_allowed_for_csv_upload is not empty
a) if database does not support schema
This situation is impossible and upload will fail
b) if database supports schema
user is able to upload to schema in schemas_allowed_for_csv_upload
elif the user does not access to the database or all datasource
1. if schemas_allowed_for_csv_upload is empty
a) if database does not support schema
user is unable to upload csv
b) if database supports schema
user is unable to upload csv
2. if schemas_allowed_for_csv_upload is not empty
a) if database does not support schema
This situation is impossible and user is unable to upload csv
b) if database supports schema
user is able to upload to schema in schemas_allowed_for_csv_upload
def at_least_one_schema_is_allowed(database):
"""
If the user has access to the database or all datasource
1. if schemas_allowed_for_csv_upload is empty
a) if database does not support schema
user is able to upload csv without specifying schema name
b) if database supports schema
user is able to upload csv to any schema
2. if schemas_allowed_for_csv_upload is not empty
a) if database does not support schema
This situation is impossible and upload will fail
b) if database supports schema
user is able to upload to schema in schemas_allowed_for_csv_upload
elif the user does not access to the database or all datasource
1. if schemas_allowed_for_csv_upload is empty
a) if database does not support schema
user is unable to upload csv
b) if database supports schema
user is unable to upload csv
2. if schemas_allowed_for_csv_upload is not empty
a) if database does not support schema
This situation is impossible and user is unable to upload csv
b) if database supports schema
user is able to upload to schema in schemas_allowed_for_csv_upload
"""
if (security_manager.database_access(database) or
security_manager.all_datasource_access()):
return True
schemas = database.get_schema_access_for_csv_upload()
if (schemas and
security_manager.schemas_accessible_by_user(
database, schemas, False)):
return True
return False
|
Filter queries to only those owned by current user if
can_only_access_owned_queries permission is set.
:returns: query
def apply(
self,
query: BaseQuery,
func: Callable) -> BaseQuery:
"""
Filter queries to only those owned by current user if
can_only_access_owned_queries permission is set.
:returns: query
"""
if security_manager.can_only_access_owned_queries():
query = (
query
.filter(Query.user_id == g.user.get_user_id())
)
return query
|
Simple hack to redirect to explore view after saving
def edit(self, pk):
"""Simple hack to redirect to explore view after saving"""
resp = super(TableModelView, self).edit(pk)
if isinstance(resp, str):
return resp
return redirect('/superset/explore/table/{}/'.format(pk))
|
Get/cache a language pack
Returns the langugage pack from cache if it exists, caches otherwise
>>> get_language_pack('fr')['Dashboards']
"Tableaux de bords"
def get_language_pack(locale):
"""Get/cache a language pack
Returns the langugage pack from cache if it exists, caches otherwise
>>> get_language_pack('fr')['Dashboards']
"Tableaux de bords"
"""
pack = ALL_LANGUAGE_PACKS.get(locale)
if not pack:
filename = DIR + '/{}/LC_MESSAGES/messages.json'.format(locale)
try:
with open(filename) as f:
pack = json.load(f)
ALL_LANGUAGE_PACKS[locale] = pack
except Exception:
# Assuming english, client side falls back on english
pass
return pack
|
Build `form_data` for chart GET request from dashboard's `default_filters`.
When a dashboard has `default_filters` they need to be added as extra
filters in the GET request for charts.
def get_form_data(chart_id, dashboard=None):
"""
Build `form_data` for chart GET request from dashboard's `default_filters`.
When a dashboard has `default_filters` they need to be added as extra
filters in the GET request for charts.
"""
form_data = {'slice_id': chart_id}
if dashboard is None or not dashboard.json_metadata:
return form_data
json_metadata = json.loads(dashboard.json_metadata)
# do not apply filters if chart is immune to them
if chart_id in json_metadata.get('filter_immune_slices', []):
return form_data
default_filters = json.loads(json_metadata.get('default_filters', 'null'))
if not default_filters:
return form_data
# are some of the fields in the chart immune to filters?
filter_immune_slice_fields = json_metadata.get('filter_immune_slice_fields', {})
immune_fields = filter_immune_slice_fields.get(str(chart_id), [])
extra_filters = []
for filters in default_filters.values():
for col, val in filters.items():
if col not in immune_fields:
extra_filters.append({'col': col, 'op': 'in', 'val': val})
if extra_filters:
form_data['extra_filters'] = extra_filters
return form_data
|
Return external URL for warming up a given chart/table cache.
def get_url(params):
"""Return external URL for warming up a given chart/table cache."""
baseurl = 'http://{SUPERSET_WEBSERVER_ADDRESS}:{SUPERSET_WEBSERVER_PORT}/'.format(
**app.config)
with app.test_request_context():
return urllib.parse.urljoin(
baseurl,
url_for('Superset.explore_json', **params),
)
|
Warm up cache.
This task periodically hits charts to warm up the cache.
def cache_warmup(strategy_name, *args, **kwargs):
"""
Warm up cache.
This task periodically hits charts to warm up the cache.
"""
logger.info('Loading strategy')
class_ = None
for class_ in strategies:
if class_.name == strategy_name:
break
else:
message = f'No strategy {strategy_name} found!'
logger.error(message)
return message
logger.info(f'Loading {class_.__name__}')
try:
strategy = class_(*args, **kwargs)
logger.info('Success!')
except TypeError:
message = 'Error loading strategy!'
logger.exception(message)
return message
results = {'success': [], 'errors': []}
for url in strategy.get_urls():
try:
logger.info(f'Fetching {url}')
requests.get(url)
results['success'].append(url)
except RequestException:
logger.exception('Error warming up cache!')
results['errors'].append(url)
return results
|
Mocked. Retrieve the logs produced by the execution of the query.
Can be called multiple times to fetch the logs produced after
the previous call.
:returns: list<str>
:raises: ``ProgrammingError`` when no query has been started
.. note::
This is not a part of DB-API.
def fetch_logs(self, max_rows=1024,
orientation=None):
"""Mocked. Retrieve the logs produced by the execution of the query.
Can be called multiple times to fetch the logs produced after
the previous call.
:returns: list<str>
:raises: ``ProgrammingError`` when no query has been started
.. note::
This is not a part of DB-API.
"""
from pyhive import hive
from TCLIService import ttypes
from thrift import Thrift
orientation = orientation or ttypes.TFetchOrientation.FETCH_NEXT
try:
req = ttypes.TGetLogReq(operationHandle=self._operationHandle)
logs = self._connection.client.GetLog(req).log
return logs
# raised if Hive is used
except (ttypes.TApplicationException,
Thrift.TApplicationException):
if self._state == self._STATE_NONE:
raise hive.ProgrammingError('No query yet')
logs = []
while True:
req = ttypes.TFetchResultsReq(
operationHandle=self._operationHandle,
orientation=ttypes.TFetchOrientation.FETCH_NEXT,
maxRows=self.arraysize,
fetchType=1, # 0: results, 1: logs
)
response = self._connection.client.FetchResults(req)
hive._check_status(response)
assert not response.results.rows, \
'expected data in columnar format'
assert len(response.results.columns) == 1, response.results.columns
new_logs = hive._unwrap_column(response.results.columns[0])
logs += new_logs
if not new_logs:
break
return '\n'.join(logs)
|
Refresh metadata of all datasources in the cluster
If ``datasource_name`` is specified, only that datasource is updated
def refresh_datasources(
self,
datasource_name=None,
merge_flag=True,
refreshAll=True):
"""Refresh metadata of all datasources in the cluster
If ``datasource_name`` is specified, only that datasource is updated
"""
ds_list = self.get_datasources()
blacklist = conf.get('DRUID_DATA_SOURCE_BLACKLIST', [])
ds_refresh = []
if not datasource_name:
ds_refresh = list(filter(lambda ds: ds not in blacklist, ds_list))
elif datasource_name not in blacklist and datasource_name in ds_list:
ds_refresh.append(datasource_name)
else:
return
self.refresh(ds_refresh, merge_flag, refreshAll)
|
Fetches metadata for the specified datasources and
merges to the Superset database
def refresh(self, datasource_names, merge_flag, refreshAll):
"""
Fetches metadata for the specified datasources and
merges to the Superset database
"""
session = db.session
ds_list = (
session.query(DruidDatasource)
.filter(DruidDatasource.cluster_name == self.cluster_name)
.filter(DruidDatasource.datasource_name.in_(datasource_names))
)
ds_map = {ds.name: ds for ds in ds_list}
for ds_name in datasource_names:
datasource = ds_map.get(ds_name, None)
if not datasource:
datasource = DruidDatasource(datasource_name=ds_name)
with session.no_autoflush:
session.add(datasource)
flasher(
_('Adding new datasource [{}]').format(ds_name), 'success')
ds_map[ds_name] = datasource
elif refreshAll:
flasher(
_('Refreshing datasource [{}]').format(ds_name), 'info')
else:
del ds_map[ds_name]
continue
datasource.cluster = self
datasource.merge_flag = merge_flag
session.flush()
# Prepare multithreaded executation
pool = ThreadPool()
ds_refresh = list(ds_map.values())
metadata = pool.map(_fetch_metadata_for, ds_refresh)
pool.close()
pool.join()
for i in range(0, len(ds_refresh)):
datasource = ds_refresh[i]
cols = metadata[i]
if cols:
col_objs_list = (
session.query(DruidColumn)
.filter(DruidColumn.datasource_id == datasource.id)
.filter(DruidColumn.column_name.in_(cols.keys()))
)
col_objs = {col.column_name: col for col in col_objs_list}
for col in cols:
if col == '__time': # skip the time column
continue
col_obj = col_objs.get(col)
if not col_obj:
col_obj = DruidColumn(
datasource_id=datasource.id,
column_name=col)
with session.no_autoflush:
session.add(col_obj)
col_obj.type = cols[col]['type']
col_obj.datasource = datasource
if col_obj.type == 'STRING':
col_obj.groupby = True
col_obj.filterable = True
datasource.refresh_metrics()
session.commit()
|
Refresh metrics based on the column metadata
def refresh_metrics(self):
"""Refresh metrics based on the column metadata"""
metrics = self.get_metrics()
dbmetrics = (
db.session.query(DruidMetric)
.filter(DruidMetric.datasource_id == self.datasource_id)
.filter(DruidMetric.metric_name.in_(metrics.keys()))
)
dbmetrics = {metric.metric_name: metric for metric in dbmetrics}
for metric in metrics.values():
dbmetric = dbmetrics.get(metric.metric_name)
if dbmetric:
for attr in ['json', 'metric_type']:
setattr(dbmetric, attr, getattr(metric, attr))
else:
with db.session.no_autoflush:
metric.datasource_id = self.datasource_id
db.session.add(metric)
|
Imports the datasource from the object to the database.
Metrics and columns and datasource will be overridden if exists.
This function can be used to import/export dashboards between multiple
superset instances. Audit metadata isn't copies over.
def import_obj(cls, i_datasource, import_time=None):
"""Imports the datasource from the object to the database.
Metrics and columns and datasource will be overridden if exists.
This function can be used to import/export dashboards between multiple
superset instances. Audit metadata isn't copies over.
"""
def lookup_datasource(d):
return db.session.query(DruidDatasource).filter(
DruidDatasource.datasource_name == d.datasource_name,
DruidCluster.cluster_name == d.cluster_name,
).first()
def lookup_cluster(d):
return db.session.query(DruidCluster).filter_by(
cluster_name=d.cluster_name).one()
return import_datasource.import_datasource(
db.session, i_datasource, lookup_cluster, lookup_datasource,
import_time)
|
Merges the ds config from druid_config into one stored in the db.
def sync_to_db_from_config(
cls,
druid_config,
user,
cluster,
refresh=True):
"""Merges the ds config from druid_config into one stored in the db."""
session = db.session
datasource = (
session.query(cls)
.filter_by(datasource_name=druid_config['name'])
.first()
)
# Create a new datasource.
if not datasource:
datasource = cls(
datasource_name=druid_config['name'],
cluster=cluster,
owners=[user],
changed_by_fk=user.id,
created_by_fk=user.id,
)
session.add(datasource)
elif not refresh:
return
dimensions = druid_config['dimensions']
col_objs = (
session.query(DruidColumn)
.filter(DruidColumn.datasource_id == datasource.id)
.filter(DruidColumn.column_name.in_(dimensions))
)
col_objs = {col.column_name: col for col in col_objs}
for dim in dimensions:
col_obj = col_objs.get(dim, None)
if not col_obj:
col_obj = DruidColumn(
datasource_id=datasource.id,
column_name=dim,
groupby=True,
filterable=True,
# TODO: fetch type from Hive.
type='STRING',
datasource=datasource,
)
session.add(col_obj)
# Import Druid metrics
metric_objs = (
session.query(DruidMetric)
.filter(DruidMetric.datasource_id == datasource.id)
.filter(DruidMetric.metric_name.in_(
spec['name'] for spec in druid_config['metrics_spec']
))
)
metric_objs = {metric.metric_name: metric for metric in metric_objs}
for metric_spec in druid_config['metrics_spec']:
metric_name = metric_spec['name']
metric_type = metric_spec['type']
metric_json = json.dumps(metric_spec)
if metric_type == 'count':
metric_type = 'longSum'
metric_json = json.dumps({
'type': 'longSum',
'name': metric_name,
'fieldName': metric_name,
})
metric_obj = metric_objs.get(metric_name, None)
if not metric_obj:
metric_obj = DruidMetric(
metric_name=metric_name,
metric_type=metric_type,
verbose_name='%s(%s)' % (metric_type, metric_name),
datasource=datasource,
json=metric_json,
description=(
'Imported from the airolap config dir for %s' %
druid_config['name']),
)
session.add(metric_obj)
session.commit()
|
For a metric specified as `postagg` returns the
kind of post aggregation for pydruid.
def get_post_agg(mconf):
"""
For a metric specified as `postagg` returns the
kind of post aggregation for pydruid.
"""
if mconf.get('type') == 'javascript':
return JavascriptPostAggregator(
name=mconf.get('name', ''),
field_names=mconf.get('fieldNames', []),
function=mconf.get('function', ''))
elif mconf.get('type') == 'quantile':
return Quantile(
mconf.get('name', ''),
mconf.get('probability', ''),
)
elif mconf.get('type') == 'quantiles':
return Quantiles(
mconf.get('name', ''),
mconf.get('probabilities', ''),
)
elif mconf.get('type') == 'fieldAccess':
return Field(mconf.get('name'))
elif mconf.get('type') == 'constant':
return Const(
mconf.get('value'),
output_name=mconf.get('name', ''),
)
elif mconf.get('type') == 'hyperUniqueCardinality':
return HyperUniqueCardinality(
mconf.get('name'),
)
elif mconf.get('type') == 'arithmetic':
return Postaggregator(
mconf.get('fn', '/'),
mconf.get('fields', []),
mconf.get('name', ''))
else:
return CustomPostAggregator(
mconf.get('name', ''),
mconf)
|
Return a list of metrics that are post aggregations
def find_postaggs_for(postagg_names, metrics_dict):
"""Return a list of metrics that are post aggregations"""
postagg_metrics = [
metrics_dict[name] for name in postagg_names
if metrics_dict[name].metric_type == POST_AGG_TYPE
]
# Remove post aggregations that were found
for postagg in postagg_metrics:
postagg_names.remove(postagg.metric_name)
return postagg_metrics
|
Retrieve some values for the given column
def values_for_column(self,
column_name,
limit=10000):
"""Retrieve some values for the given column"""
logging.info(
'Getting values for columns [{}] limited to [{}]'
.format(column_name, limit))
# TODO: Use Lexicographic TopNMetricSpec once supported by PyDruid
if self.fetch_values_from:
from_dttm = utils.parse_human_datetime(self.fetch_values_from)
else:
from_dttm = datetime(1970, 1, 1)
qry = dict(
datasource=self.datasource_name,
granularity='all',
intervals=from_dttm.isoformat() + '/' + datetime.now().isoformat(),
aggregations=dict(count=count('count')),
dimension=column_name,
metric='count',
threshold=limit,
)
client = self.cluster.get_pydruid_client()
client.topn(**qry)
df = client.export_pandas()
return [row[column_name] for row in df.to_records(index=False)]
|
Returns a dictionary of aggregation metric names to aggregation json objects
:param metrics_dict: dictionary of all the metrics
:param saved_metrics: list of saved metric names
:param adhoc_metrics: list of adhoc metric names
:raise SupersetException: if one or more metric names are not aggregations
def get_aggregations(metrics_dict, saved_metrics, adhoc_metrics=[]):
"""
Returns a dictionary of aggregation metric names to aggregation json objects
:param metrics_dict: dictionary of all the metrics
:param saved_metrics: list of saved metric names
:param adhoc_metrics: list of adhoc metric names
:raise SupersetException: if one or more metric names are not aggregations
"""
aggregations = OrderedDict()
invalid_metric_names = []
for metric_name in saved_metrics:
if metric_name in metrics_dict:
metric = metrics_dict[metric_name]
if metric.metric_type == POST_AGG_TYPE:
invalid_metric_names.append(metric_name)
else:
aggregations[metric_name] = metric.json_obj
else:
invalid_metric_names.append(metric_name)
if len(invalid_metric_names) > 0:
raise SupersetException(
_('Metric(s) {} must be aggregations.').format(invalid_metric_names))
for adhoc_metric in adhoc_metrics:
aggregations[adhoc_metric['label']] = {
'fieldName': adhoc_metric['column']['column_name'],
'fieldNames': [adhoc_metric['column']['column_name']],
'type': DruidDatasource.druid_type_from_adhoc_metric(adhoc_metric),
'name': adhoc_metric['label'],
}
return aggregations
|
Replace dimensions specs with their `dimension`
values, and ignore those without
def _dimensions_to_values(dimensions):
"""
Replace dimensions specs with their `dimension`
values, and ignore those without
"""
values = []
for dimension in dimensions:
if isinstance(dimension, dict):
if 'extractionFn' in dimension:
values.append(dimension)
elif 'dimension' in dimension:
values.append(dimension['dimension'])
else:
values.append(dimension)
return values
|
Runs a query against Druid and returns a dataframe.
def run_query( # noqa / druid
self,
groupby, metrics,
granularity,
from_dttm, to_dttm,
filter=None, # noqa
is_timeseries=True,
timeseries_limit=None,
timeseries_limit_metric=None,
row_limit=None,
inner_from_dttm=None, inner_to_dttm=None,
orderby=None,
extras=None, # noqa
columns=None, phase=2, client=None,
order_desc=True,
prequeries=None,
is_prequery=False,
):
"""Runs a query against Druid and returns a dataframe.
"""
# TODO refactor into using a TBD Query object
client = client or self.cluster.get_pydruid_client()
row_limit = row_limit or conf.get('ROW_LIMIT')
if not is_timeseries:
granularity = 'all'
if granularity == 'all':
phase = 1
inner_from_dttm = inner_from_dttm or from_dttm
inner_to_dttm = inner_to_dttm or to_dttm
timezone = from_dttm.replace(tzinfo=DRUID_TZ).tzname() if from_dttm else None
query_str = ''
metrics_dict = {m.metric_name: m for m in self.metrics}
columns_dict = {c.column_name: c for c in self.columns}
if (
self.cluster and
LooseVersion(self.cluster.get_druid_version()) < LooseVersion('0.11.0')
):
for metric in metrics:
self.sanitize_metric_object(metric)
self.sanitize_metric_object(timeseries_limit_metric)
aggregations, post_aggs = DruidDatasource.metrics_and_post_aggs(
metrics,
metrics_dict)
self.check_restricted_metrics(aggregations)
# the dimensions list with dimensionSpecs expanded
dimensions = self.get_dimensions(groupby, columns_dict)
extras = extras or {}
qry = dict(
datasource=self.datasource_name,
dimensions=dimensions,
aggregations=aggregations,
granularity=DruidDatasource.granularity(
granularity,
timezone=timezone,
origin=extras.get('druid_time_origin'),
),
post_aggregations=post_aggs,
intervals=self.intervals_from_dttms(from_dttm, to_dttm),
)
filters = DruidDatasource.get_filters(filter, self.num_cols, columns_dict)
if filters:
qry['filter'] = filters
having_filters = self.get_having_filters(extras.get('having_druid'))
if having_filters:
qry['having'] = having_filters
order_direction = 'descending' if order_desc else 'ascending'
if columns:
columns.append('__time')
del qry['post_aggregations']
del qry['aggregations']
qry['dimensions'] = columns
qry['metrics'] = []
qry['granularity'] = 'all'
qry['limit'] = row_limit
client.scan(**qry)
elif len(groupby) == 0 and not having_filters:
logging.info('Running timeseries query for no groupby values')
del qry['dimensions']
client.timeseries(**qry)
elif (
not having_filters and
len(groupby) == 1 and
order_desc
):
dim = list(qry.get('dimensions'))[0]
logging.info('Running two-phase topn query for dimension [{}]'.format(dim))
pre_qry = deepcopy(qry)
if timeseries_limit_metric:
order_by = utils.get_metric_name(timeseries_limit_metric)
aggs_dict, post_aggs_dict = DruidDatasource.metrics_and_post_aggs(
[timeseries_limit_metric],
metrics_dict)
if phase == 1:
pre_qry['aggregations'].update(aggs_dict)
pre_qry['post_aggregations'].update(post_aggs_dict)
else:
pre_qry['aggregations'] = aggs_dict
pre_qry['post_aggregations'] = post_aggs_dict
else:
agg_keys = qry['aggregations'].keys()
order_by = list(agg_keys)[0] if agg_keys else None
# Limit on the number of timeseries, doing a two-phases query
pre_qry['granularity'] = 'all'
pre_qry['threshold'] = min(row_limit,
timeseries_limit or row_limit)
pre_qry['metric'] = order_by
pre_qry['dimension'] = self._dimensions_to_values(qry.get('dimensions'))[0]
del pre_qry['dimensions']
client.topn(**pre_qry)
logging.info('Phase 1 Complete')
if phase == 2:
query_str += '// Two phase query\n// Phase 1\n'
query_str += json.dumps(
client.query_builder.last_query.query_dict, indent=2)
query_str += '\n'
if phase == 1:
return query_str
query_str += (
"// Phase 2 (built based on phase one's results)\n")
df = client.export_pandas()
qry['filter'] = self._add_filter_from_pre_query_data(
df,
[pre_qry['dimension']],
filters)
qry['threshold'] = timeseries_limit or 1000
if row_limit and granularity == 'all':
qry['threshold'] = row_limit
qry['dimension'] = dim
del qry['dimensions']
qry['metric'] = list(qry['aggregations'].keys())[0]
client.topn(**qry)
logging.info('Phase 2 Complete')
elif len(groupby) > 0 or having_filters:
# If grouping on multiple fields or using a having filter
# we have to force a groupby query
logging.info('Running groupby query for dimensions [{}]'.format(dimensions))
if timeseries_limit and is_timeseries:
logging.info('Running two-phase query for timeseries')
pre_qry = deepcopy(qry)
pre_qry_dims = self._dimensions_to_values(qry['dimensions'])
# Can't use set on an array with dicts
# Use set with non-dict items only
non_dict_dims = list(
set([x for x in pre_qry_dims if not isinstance(x, dict)]),
)
dict_dims = [x for x in pre_qry_dims if isinstance(x, dict)]
pre_qry['dimensions'] = non_dict_dims + dict_dims
order_by = None
if metrics:
order_by = utils.get_metric_name(metrics[0])
else:
order_by = pre_qry_dims[0]
if timeseries_limit_metric:
order_by = utils.get_metric_name(timeseries_limit_metric)
aggs_dict, post_aggs_dict = DruidDatasource.metrics_and_post_aggs(
[timeseries_limit_metric],
metrics_dict)
if phase == 1:
pre_qry['aggregations'].update(aggs_dict)
pre_qry['post_aggregations'].update(post_aggs_dict)
else:
pre_qry['aggregations'] = aggs_dict
pre_qry['post_aggregations'] = post_aggs_dict
# Limit on the number of timeseries, doing a two-phases query
pre_qry['granularity'] = 'all'
pre_qry['limit_spec'] = {
'type': 'default',
'limit': min(timeseries_limit, row_limit),
'intervals': self.intervals_from_dttms(
inner_from_dttm, inner_to_dttm),
'columns': [{
'dimension': order_by,
'direction': order_direction,
}],
}
client.groupby(**pre_qry)
logging.info('Phase 1 Complete')
query_str += '// Two phase query\n// Phase 1\n'
query_str += json.dumps(
client.query_builder.last_query.query_dict, indent=2)
query_str += '\n'
if phase == 1:
return query_str
query_str += (
"// Phase 2 (built based on phase one's results)\n")
df = client.export_pandas()
qry['filter'] = self._add_filter_from_pre_query_data(
df,
pre_qry['dimensions'],
filters,
)
qry['limit_spec'] = None
if row_limit:
dimension_values = self._dimensions_to_values(dimensions)
qry['limit_spec'] = {
'type': 'default',
'limit': row_limit,
'columns': [{
'dimension': (
utils.get_metric_name(
metrics[0],
) if metrics else dimension_values[0]
),
'direction': order_direction,
}],
}
client.groupby(**qry)
logging.info('Query Complete')
query_str += json.dumps(
client.query_builder.last_query.query_dict, indent=2)
return query_str
|
Converting all GROUPBY columns to strings
When grouping by a numeric (say FLOAT) column, pydruid returns
strings in the dataframe. This creates issues downstream related
to having mixed types in the dataframe
Here we replace None with <NULL> and make the whole series a
str instead of an object.
def homogenize_types(df, groupby_cols):
"""Converting all GROUPBY columns to strings
When grouping by a numeric (say FLOAT) column, pydruid returns
strings in the dataframe. This creates issues downstream related
to having mixed types in the dataframe
Here we replace None with <NULL> and make the whole series a
str instead of an object.
"""
for col in groupby_cols:
df[col] = df[col].fillna('<NULL>').astype('unicode')
return df
|
Given Superset filter data structure, returns pydruid Filter(s)
def get_filters(cls, raw_filters, num_cols, columns_dict): # noqa
"""Given Superset filter data structure, returns pydruid Filter(s)"""
filters = None
for flt in raw_filters:
col = flt.get('col')
op = flt.get('op')
eq = flt.get('val')
if (
not col or
not op or
(eq is None and op not in ('IS NULL', 'IS NOT NULL'))):
continue
# Check if this dimension uses an extraction function
# If so, create the appropriate pydruid extraction object
column_def = columns_dict.get(col)
dim_spec = column_def.dimension_spec if column_def else None
extraction_fn = None
if dim_spec and 'extractionFn' in dim_spec:
(col, extraction_fn) = DruidDatasource._create_extraction_fn(dim_spec)
cond = None
is_numeric_col = col in num_cols
is_list_target = op in ('in', 'not in')
eq = cls.filter_values_handler(
eq, is_list_target=is_list_target,
target_column_is_numeric=is_numeric_col)
# For these two ops, could have used Dimension,
# but it doesn't support extraction functions
if op == '==':
cond = Filter(dimension=col, value=eq, extraction_function=extraction_fn)
elif op == '!=':
cond = ~Filter(dimension=col, value=eq, extraction_function=extraction_fn)
elif op in ('in', 'not in'):
fields = []
# ignore the filter if it has no value
if not len(eq):
continue
# if it uses an extraction fn, use the "in" operator
# as Dimension isn't supported
elif extraction_fn is not None:
cond = Filter(
dimension=col,
values=eq,
type='in',
extraction_function=extraction_fn,
)
elif len(eq) == 1:
cond = Dimension(col) == eq[0]
else:
for s in eq:
fields.append(Dimension(col) == s)
cond = Filter(type='or', fields=fields)
if op == 'not in':
cond = ~cond
elif op == 'regex':
cond = Filter(
extraction_function=extraction_fn,
type='regex',
pattern=eq,
dimension=col,
)
# For the ops below, could have used pydruid's Bound,
# but it doesn't support extraction functions
elif op == '>=':
cond = Filter(
type='bound',
extraction_function=extraction_fn,
dimension=col,
lowerStrict=False,
upperStrict=False,
lower=eq,
upper=None,
alphaNumeric=is_numeric_col,
)
elif op == '<=':
cond = Filter(
type='bound',
extraction_function=extraction_fn,
dimension=col,
lowerStrict=False,
upperStrict=False,
lower=None,
upper=eq,
alphaNumeric=is_numeric_col,
)
elif op == '>':
cond = Filter(
type='bound',
extraction_function=extraction_fn,
lowerStrict=True,
upperStrict=False,
dimension=col,
lower=eq,
upper=None,
alphaNumeric=is_numeric_col,
)
elif op == '<':
cond = Filter(
type='bound',
extraction_function=extraction_fn,
upperStrict=True,
lowerStrict=False,
dimension=col,
lower=None,
upper=eq,
alphaNumeric=is_numeric_col,
)
elif op == 'IS NULL':
cond = Dimension(col) == None # NOQA
elif op == 'IS NOT NULL':
cond = Dimension(col) != None # NOQA
if filters:
filters = Filter(type='and', fields=[
cond,
filters,
])
else:
filters = cond
return filters
|
Get the environment variable or raise exception.
def get_env_variable(var_name, default=None):
"""Get the environment variable or raise exception."""
try:
return os.environ[var_name]
except KeyError:
if default is not None:
return default
else:
error_msg = 'The environment variable {} was missing, abort...'\
.format(var_name)
raise EnvironmentError(error_msg)
|
Returns datasource with columns and metrics.
def get_eager_datasource(cls, session, datasource_type, datasource_id):
"""Returns datasource with columns and metrics."""
datasource_class = ConnectorRegistry.sources[datasource_type]
return (
session.query(datasource_class)
.options(
subqueryload(datasource_class.columns),
subqueryload(datasource_class.metrics),
)
.filter_by(id=datasource_id)
.one()
)
|
Loading a dashboard featuring misc charts
def load_misc_dashboard():
"""Loading a dashboard featuring misc charts"""
print('Creating the dashboard')
db.session.expunge_all()
dash = db.session.query(Dash).filter_by(slug=DASH_SLUG).first()
if not dash:
dash = Dash()
js = textwrap.dedent("""\
{
"CHART-BkeVbh8ANQ": {
"children": [],
"id": "CHART-BkeVbh8ANQ",
"meta": {
"chartId": 4004,
"height": 34,
"sliceName": "Multi Line",
"width": 8
},
"type": "CHART"
},
"CHART-H1HYNzEANX": {
"children": [],
"id": "CHART-H1HYNzEANX",
"meta": {
"chartId": 3940,
"height": 50,
"sliceName": "Energy Sankey",
"width": 6
},
"type": "CHART"
},
"CHART-HJOYVMV0E7": {
"children": [],
"id": "CHART-HJOYVMV0E7",
"meta": {
"chartId": 3969,
"height": 63,
"sliceName": "Mapbox Long/Lat",
"width": 6
},
"type": "CHART"
},
"CHART-S1WYNz4AVX": {
"children": [],
"id": "CHART-S1WYNz4AVX",
"meta": {
"chartId": 3989,
"height": 25,
"sliceName": "Parallel Coordinates",
"width": 4
},
"type": "CHART"
},
"CHART-r19KVMNCE7": {
"children": [],
"id": "CHART-r19KVMNCE7",
"meta": {
"chartId": 3971,
"height": 34,
"sliceName": "Calendar Heatmap multiformat 0",
"width": 4
},
"type": "CHART"
},
"CHART-rJ4K4GV04Q": {
"children": [],
"id": "CHART-rJ4K4GV04Q",
"meta": {
"chartId": 3941,
"height": 63,
"sliceName": "Energy Force Layout",
"width": 6
},
"type": "CHART"
},
"CHART-rkgF4G4A4X": {
"children": [],
"id": "CHART-rkgF4G4A4X",
"meta": {
"chartId": 3970,
"height": 25,
"sliceName": "Birth in France by department in 2016",
"width": 8
},
"type": "CHART"
},
"CHART-rywK4GVR4X": {
"children": [],
"id": "CHART-rywK4GVR4X",
"meta": {
"chartId": 3942,
"height": 50,
"sliceName": "Heatmap",
"width": 6
},
"type": "CHART"
},
"COLUMN-ByUFVf40EQ": {
"children": [
"CHART-rywK4GVR4X",
"CHART-HJOYVMV0E7"
],
"id": "COLUMN-ByUFVf40EQ",
"meta": {
"background": "BACKGROUND_TRANSPARENT",
"width": 6
},
"type": "COLUMN"
},
"COLUMN-rkmYVGN04Q": {
"children": [
"CHART-rJ4K4GV04Q",
"CHART-H1HYNzEANX"
],
"id": "COLUMN-rkmYVGN04Q",
"meta": {
"background": "BACKGROUND_TRANSPARENT",
"width": 6
},
"type": "COLUMN"
},
"GRID_ID": {
"children": [
"ROW-SytNzNA4X",
"ROW-S1MK4M4A4X",
"ROW-HkFFEzVRVm"
],
"id": "GRID_ID",
"type": "GRID"
},
"HEADER_ID": {
"id": "HEADER_ID",
"meta": {
"text": "Misc Charts"
},
"type": "HEADER"
},
"ROOT_ID": {
"children": [
"GRID_ID"
],
"id": "ROOT_ID",
"type": "ROOT"
},
"ROW-HkFFEzVRVm": {
"children": [
"CHART-r19KVMNCE7",
"CHART-BkeVbh8ANQ"
],
"id": "ROW-HkFFEzVRVm",
"meta": {
"background": "BACKGROUND_TRANSPARENT"
},
"type": "ROW"
},
"ROW-S1MK4M4A4X": {
"children": [
"COLUMN-rkmYVGN04Q",
"COLUMN-ByUFVf40EQ"
],
"id": "ROW-S1MK4M4A4X",
"meta": {
"background": "BACKGROUND_TRANSPARENT"
},
"type": "ROW"
},
"ROW-SytNzNA4X": {
"children": [
"CHART-rkgF4G4A4X",
"CHART-S1WYNz4AVX"
],
"id": "ROW-SytNzNA4X",
"meta": {
"background": "BACKGROUND_TRANSPARENT"
},
"type": "ROW"
},
"DASHBOARD_VERSION_KEY": "v2"
}
""")
pos = json.loads(js)
slices = (
db.session
.query(Slice)
.filter(Slice.slice_name.in_(misc_dash_slices))
.all()
)
slices = sorted(slices, key=lambda x: x.id)
update_slice_ids(pos, slices)
dash.dashboard_title = 'Misc Charts'
dash.position_json = json.dumps(pos, indent=4)
dash.slug = DASH_SLUG
dash.slices = slices
db.session.merge(dash)
db.session.commit()
|
Loads the world bank health dataset, slices and a dashboard
def load_world_bank_health_n_pop():
"""Loads the world bank health dataset, slices and a dashboard"""
tbl_name = 'wb_health_population'
data = get_example_data('countries.json.gz')
pdf = pd.read_json(data)
pdf.columns = [col.replace('.', '_') for col in pdf.columns]
pdf.year = pd.to_datetime(pdf.year)
pdf.to_sql(
tbl_name,
db.engine,
if_exists='replace',
chunksize=50,
dtype={
'year': DateTime(),
'country_code': String(3),
'country_name': String(255),
'region': String(255),
},
index=False)
print('Creating table [wb_health_population] reference')
tbl = db.session.query(TBL).filter_by(table_name=tbl_name).first()
if not tbl:
tbl = TBL(table_name=tbl_name)
tbl.description = utils.readfile(os.path.join(DATA_FOLDER, 'countries.md'))
tbl.main_dttm_col = 'year'
tbl.database = utils.get_or_create_main_db()
tbl.filter_select_enabled = True
metrics = [
'sum__SP_POP_TOTL', 'sum__SH_DYN_AIDS', 'sum__SH_DYN_AIDS',
'sum__SP_RUR_TOTL_ZS', 'sum__SP_DYN_LE00_IN',
]
for m in metrics:
if not any(col.metric_name == m for col in tbl.metrics):
tbl.metrics.append(SqlMetric(
metric_name=m,
expression=f'{m[:3]}({m[5:]})',
))
db.session.merge(tbl)
db.session.commit()
tbl.fetch_metadata()
defaults = {
'compare_lag': '10',
'compare_suffix': 'o10Y',
'limit': '25',
'granularity_sqla': 'year',
'groupby': [],
'metric': 'sum__SP_POP_TOTL',
'metrics': ['sum__SP_POP_TOTL'],
'row_limit': config.get('ROW_LIMIT'),
'since': '2014-01-01',
'until': '2014-01-02',
'time_range': '2014-01-01 : 2014-01-02',
'where': '',
'markup_type': 'markdown',
'country_fieldtype': 'cca3',
'secondary_metric': 'sum__SP_POP_TOTL',
'entity': 'country_code',
'show_bubbles': True,
}
print('Creating slices')
slices = [
Slice(
slice_name='Region Filter',
viz_type='filter_box',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type='filter_box',
date_filter=False,
filter_configs=[
{
'asc': False,
'clearable': True,
'column': 'region',
'key': '2s98dfu',
'metric': 'sum__SP_POP_TOTL',
'multiple': True,
}, {
'asc': False,
'clearable': True,
'key': 'li3j2lk',
'column': 'country_name',
'metric': 'sum__SP_POP_TOTL',
'multiple': True,
},
])),
Slice(
slice_name="World's Population",
viz_type='big_number',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
since='2000',
viz_type='big_number',
compare_lag='10',
metric='sum__SP_POP_TOTL',
compare_suffix='over 10Y')),
Slice(
slice_name='Most Populated Countries',
viz_type='table',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type='table',
metrics=['sum__SP_POP_TOTL'],
groupby=['country_name'])),
Slice(
slice_name='Growth Rate',
viz_type='line',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type='line',
since='1960-01-01',
metrics=['sum__SP_POP_TOTL'],
num_period_compare='10',
groupby=['country_name'])),
Slice(
slice_name='% Rural',
viz_type='world_map',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type='world_map',
metric='sum__SP_RUR_TOTL_ZS',
num_period_compare='10')),
Slice(
slice_name='Life Expectancy VS Rural %',
viz_type='bubble',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type='bubble',
since='2011-01-01',
until='2011-01-02',
series='region',
limit=0,
entity='country_name',
x='sum__SP_RUR_TOTL_ZS',
y='sum__SP_DYN_LE00_IN',
size='sum__SP_POP_TOTL',
max_bubble_size='50',
filters=[{
'col': 'country_code',
'val': [
'TCA', 'MNP', 'DMA', 'MHL', 'MCO', 'SXM', 'CYM',
'TUV', 'IMY', 'KNA', 'ASM', 'ADO', 'AMA', 'PLW',
],
'op': 'not in'}],
)),
Slice(
slice_name='Rural Breakdown',
viz_type='sunburst',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type='sunburst',
groupby=['region', 'country_name'],
secondary_metric='sum__SP_RUR_TOTL',
since='2011-01-01',
until='2011-01-01')),
Slice(
slice_name="World's Pop Growth",
viz_type='area',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
since='1960-01-01',
until='now',
viz_type='area',
groupby=['region'])),
Slice(
slice_name='Box plot',
viz_type='box_plot',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
since='1960-01-01',
until='now',
whisker_options='Min/max (no outliers)',
x_ticks_layout='staggered',
viz_type='box_plot',
groupby=['region'])),
Slice(
slice_name='Treemap',
viz_type='treemap',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
since='1960-01-01',
until='now',
viz_type='treemap',
metrics=['sum__SP_POP_TOTL'],
groupby=['region', 'country_code'])),
Slice(
slice_name='Parallel Coordinates',
viz_type='para',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
since='2011-01-01',
until='2011-01-01',
viz_type='para',
limit=100,
metrics=[
'sum__SP_POP_TOTL',
'sum__SP_RUR_TOTL_ZS',
'sum__SH_DYN_AIDS'],
secondary_metric='sum__SP_POP_TOTL',
series='country_name')),
]
misc_dash_slices.add(slices[-1].slice_name)
for slc in slices:
merge_slice(slc)
print("Creating a World's Health Bank dashboard")
dash_name = "World's Bank Data"
slug = 'world_health'
dash = db.session.query(Dash).filter_by(slug=slug).first()
if not dash:
dash = Dash()
js = textwrap.dedent("""\
{
"CHART-36bfc934": {
"children": [],
"id": "CHART-36bfc934",
"meta": {
"chartId": 40,
"height": 25,
"sliceName": "Region Filter",
"width": 2
},
"type": "CHART"
},
"CHART-37982887": {
"children": [],
"id": "CHART-37982887",
"meta": {
"chartId": 41,
"height": 25,
"sliceName": "World's Population",
"width": 2
},
"type": "CHART"
},
"CHART-17e0f8d8": {
"children": [],
"id": "CHART-17e0f8d8",
"meta": {
"chartId": 42,
"height": 92,
"sliceName": "Most Populated Countries",
"width": 3
},
"type": "CHART"
},
"CHART-2ee52f30": {
"children": [],
"id": "CHART-2ee52f30",
"meta": {
"chartId": 43,
"height": 38,
"sliceName": "Growth Rate",
"width": 6
},
"type": "CHART"
},
"CHART-2d5b6871": {
"children": [],
"id": "CHART-2d5b6871",
"meta": {
"chartId": 44,
"height": 52,
"sliceName": "% Rural",
"width": 7
},
"type": "CHART"
},
"CHART-0fd0d252": {
"children": [],
"id": "CHART-0fd0d252",
"meta": {
"chartId": 45,
"height": 50,
"sliceName": "Life Expectancy VS Rural %",
"width": 8
},
"type": "CHART"
},
"CHART-97f4cb48": {
"children": [],
"id": "CHART-97f4cb48",
"meta": {
"chartId": 46,
"height": 38,
"sliceName": "Rural Breakdown",
"width": 3
},
"type": "CHART"
},
"CHART-b5e05d6f": {
"children": [],
"id": "CHART-b5e05d6f",
"meta": {
"chartId": 47,
"height": 50,
"sliceName": "World's Pop Growth",
"width": 4
},
"type": "CHART"
},
"CHART-e76e9f5f": {
"children": [],
"id": "CHART-e76e9f5f",
"meta": {
"chartId": 48,
"height": 50,
"sliceName": "Box plot",
"width": 4
},
"type": "CHART"
},
"CHART-a4808bba": {
"children": [],
"id": "CHART-a4808bba",
"meta": {
"chartId": 49,
"height": 50,
"sliceName": "Treemap",
"width": 8
},
"type": "CHART"
},
"COLUMN-071bbbad": {
"children": [
"ROW-1e064e3c",
"ROW-afdefba9"
],
"id": "COLUMN-071bbbad",
"meta": {
"background": "BACKGROUND_TRANSPARENT",
"width": 9
},
"type": "COLUMN"
},
"COLUMN-fe3914b8": {
"children": [
"CHART-36bfc934",
"CHART-37982887"
],
"id": "COLUMN-fe3914b8",
"meta": {
"background": "BACKGROUND_TRANSPARENT",
"width": 2
},
"type": "COLUMN"
},
"GRID_ID": {
"children": [
"ROW-46632bc2",
"ROW-3fa26c5d",
"ROW-812b3f13"
],
"id": "GRID_ID",
"type": "GRID"
},
"HEADER_ID": {
"id": "HEADER_ID",
"meta": {
"text": "World's Bank Data"
},
"type": "HEADER"
},
"ROOT_ID": {
"children": [
"GRID_ID"
],
"id": "ROOT_ID",
"type": "ROOT"
},
"ROW-1e064e3c": {
"children": [
"COLUMN-fe3914b8",
"CHART-2d5b6871"
],
"id": "ROW-1e064e3c",
"meta": {
"background": "BACKGROUND_TRANSPARENT"
},
"type": "ROW"
},
"ROW-3fa26c5d": {
"children": [
"CHART-b5e05d6f",
"CHART-0fd0d252"
],
"id": "ROW-3fa26c5d",
"meta": {
"background": "BACKGROUND_TRANSPARENT"
},
"type": "ROW"
},
"ROW-46632bc2": {
"children": [
"COLUMN-071bbbad",
"CHART-17e0f8d8"
],
"id": "ROW-46632bc2",
"meta": {
"background": "BACKGROUND_TRANSPARENT"
},
"type": "ROW"
},
"ROW-812b3f13": {
"children": [
"CHART-a4808bba",
"CHART-e76e9f5f"
],
"id": "ROW-812b3f13",
"meta": {
"background": "BACKGROUND_TRANSPARENT"
},
"type": "ROW"
},
"ROW-afdefba9": {
"children": [
"CHART-2ee52f30",
"CHART-97f4cb48"
],
"id": "ROW-afdefba9",
"meta": {
"background": "BACKGROUND_TRANSPARENT"
},
"type": "ROW"
},
"DASHBOARD_VERSION_KEY": "v2"
}
""")
pos = json.loads(js)
update_slice_ids(pos, slices)
dash.dashboard_title = dash_name
dash.position_json = json.dumps(pos, indent=4)
dash.slug = slug
dash.slices = slices[:-1]
db.session.merge(dash)
db.session.commit()
|
Loading data for map with country map
def load_country_map_data():
"""Loading data for map with country map"""
csv_bytes = get_example_data(
'birth_france_data_for_country_map.csv', is_gzip=False, make_bytes=True)
data = pd.read_csv(csv_bytes, encoding='utf-8')
data['dttm'] = datetime.datetime.now().date()
data.to_sql( # pylint: disable=no-member
'birth_france_by_region',
db.engine,
if_exists='replace',
chunksize=500,
dtype={
'DEPT_ID': String(10),
'2003': BigInteger,
'2004': BigInteger,
'2005': BigInteger,
'2006': BigInteger,
'2007': BigInteger,
'2008': BigInteger,
'2009': BigInteger,
'2010': BigInteger,
'2011': BigInteger,
'2012': BigInteger,
'2013': BigInteger,
'2014': BigInteger,
'dttm': Date(),
},
index=False)
print('Done loading table!')
print('-' * 80)
print('Creating table reference')
obj = db.session.query(TBL).filter_by(table_name='birth_france_by_region').first()
if not obj:
obj = TBL(table_name='birth_france_by_region')
obj.main_dttm_col = 'dttm'
obj.database = utils.get_or_create_main_db()
if not any(col.metric_name == 'avg__2004' for col in obj.metrics):
obj.metrics.append(SqlMetric(
metric_name='avg__2004',
expression='AVG(2004)',
))
db.session.merge(obj)
db.session.commit()
obj.fetch_metadata()
tbl = obj
slice_data = {
'granularity_sqla': '',
'since': '',
'until': '',
'where': '',
'viz_type': 'country_map',
'entity': 'DEPT_ID',
'metric': {
'expressionType': 'SIMPLE',
'column': {
'type': 'INT',
'column_name': '2004',
},
'aggregate': 'AVG',
'label': 'Boys',
'optionName': 'metric_112342',
},
'row_limit': 500000,
}
print('Creating a slice')
slc = Slice(
slice_name='Birth in France by department in 2016',
viz_type='country_map',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(slice_data),
)
misc_dash_slices.add(slc.slice_name)
merge_slice(slc)
|
Returns a list of SQL statements as strings, stripped
def get_statements(self):
"""Returns a list of SQL statements as strings, stripped"""
statements = []
for statement in self._parsed:
if statement:
sql = str(statement).strip(' \n;\t')
if sql:
statements.append(sql)
return statements
|
Reformats the query into the create table as query.
Works only for the single select SQL statements, in all other cases
the sql query is not modified.
:param superset_query: string, sql query that will be executed
:param table_name: string, will contain the results of the
query execution
:param overwrite, boolean, table table_name will be dropped if true
:return: string, create table as query
def as_create_table(self, table_name, overwrite=False):
"""Reformats the query into the create table as query.
Works only for the single select SQL statements, in all other cases
the sql query is not modified.
:param superset_query: string, sql query that will be executed
:param table_name: string, will contain the results of the
query execution
:param overwrite, boolean, table table_name will be dropped if true
:return: string, create table as query
"""
exec_sql = ''
sql = self.stripped()
if overwrite:
exec_sql = f'DROP TABLE IF EXISTS {table_name};\n'
exec_sql += f'CREATE TABLE {table_name} AS \n{sql}'
return exec_sql
|
returns the query with the specified limit
def get_query_with_new_limit(self, new_limit):
"""returns the query with the specified limit"""
"""does not change the underlying query"""
if not self._limit:
return self.sql + ' LIMIT ' + str(new_limit)
limit_pos = None
tokens = self._parsed[0].tokens
# Add all items to before_str until there is a limit
for pos, item in enumerate(tokens):
if item.ttype in Keyword and item.value.lower() == 'limit':
limit_pos = pos
break
limit = tokens[limit_pos + 2]
if limit.ttype == sqlparse.tokens.Literal.Number.Integer:
tokens[limit_pos + 2].value = new_limit
elif limit.is_group:
tokens[limit_pos + 2].value = (
'{}, {}'.format(next(limit.get_identifiers()), new_limit)
)
str_res = ''
for i in tokens:
str_res += str(i.value)
return str_res
|
Read a url or post parameter and use it in your SQL Lab query
When in SQL Lab, it's possible to add arbitrary URL "query string"
parameters, and use those in your SQL code. For instance you can
alter your url and add `?foo=bar`, as in
`{domain}/superset/sqllab?foo=bar`. Then if your query is something like
SELECT * FROM foo = '{{ url_param('foo') }}', it will be parsed at
runtime and replaced by the value in the URL.
As you create a visualization form this SQL Lab query, you can pass
parameters in the explore view as well as from the dashboard, and
it should carry through to your queries.
:param param: the parameter to lookup
:type param: str
:param default: the value to return in the absence of the parameter
:type default: str
def url_param(param, default=None):
"""Read a url or post parameter and use it in your SQL Lab query
When in SQL Lab, it's possible to add arbitrary URL "query string"
parameters, and use those in your SQL code. For instance you can
alter your url and add `?foo=bar`, as in
`{domain}/superset/sqllab?foo=bar`. Then if your query is something like
SELECT * FROM foo = '{{ url_param('foo') }}', it will be parsed at
runtime and replaced by the value in the URL.
As you create a visualization form this SQL Lab query, you can pass
parameters in the explore view as well as from the dashboard, and
it should carry through to your queries.
:param param: the parameter to lookup
:type param: str
:param default: the value to return in the absence of the parameter
:type default: str
"""
if request.args.get(param):
return request.args.get(param, default)
# Supporting POST as well as get
if request.form.get('form_data'):
form_data = json.loads(request.form.get('form_data'))
url_params = form_data.get('url_params') or {}
return url_params.get(param, default)
return default
|
Gets a values for a particular filter as a list
This is useful if:
- you want to use a filter box to filter a query where the name of filter box
column doesn't match the one in the select statement
- you want to have the ability for filter inside the main query for speed purposes
This searches for "filters" and "extra_filters" in form_data for a match
Usage example:
SELECT action, count(*) as times
FROM logs
WHERE action in ( {{ "'" + "','".join(filter_values('action_type')) + "'" }} )
GROUP BY 1
:param column: column/filter name to lookup
:type column: str
:param default: default value to return if there's no matching columns
:type default: str
:return: returns a list of filter values
:type: list
def filter_values(column, default=None):
""" Gets a values for a particular filter as a list
This is useful if:
- you want to use a filter box to filter a query where the name of filter box
column doesn't match the one in the select statement
- you want to have the ability for filter inside the main query for speed purposes
This searches for "filters" and "extra_filters" in form_data for a match
Usage example:
SELECT action, count(*) as times
FROM logs
WHERE action in ( {{ "'" + "','".join(filter_values('action_type')) + "'" }} )
GROUP BY 1
:param column: column/filter name to lookup
:type column: str
:param default: default value to return if there's no matching columns
:type default: str
:return: returns a list of filter values
:type: list
"""
form_data = json.loads(request.form.get('form_data', '{}'))
return_val = []
for filter_type in ['filters', 'extra_filters']:
if filter_type not in form_data:
continue
for f in form_data[filter_type]:
if f['col'] == column:
for v in f['val']:
return_val.append(v)
if return_val:
return return_val
if default:
return [default]
else:
return []
|
Processes a sql template
>>> sql = "SELECT '{{ datetime(2017, 1, 1).isoformat() }}'"
>>> process_template(sql)
"SELECT '2017-01-01T00:00:00'"
def process_template(self, sql, **kwargs):
"""Processes a sql template
>>> sql = "SELECT '{{ datetime(2017, 1, 1).isoformat() }}'"
>>> process_template(sql)
"SELECT '2017-01-01T00:00:00'"
"""
template = self.env.from_string(sql)
kwargs.update(self.context)
return template.render(kwargs)
|
Compatibility layer for handling of datasource info
datasource_id & datasource_type used to be passed in the URL
directory, now they should come as part of the form_data,
This function allows supporting both without duplicating code
def get_datasource_info(datasource_id, datasource_type, form_data):
"""Compatibility layer for handling of datasource info
datasource_id & datasource_type used to be passed in the URL
directory, now they should come as part of the form_data,
This function allows supporting both without duplicating code"""
datasource = form_data.get('datasource', '')
if '__' in datasource:
datasource_id, datasource_type = datasource.split('__')
# The case where the datasource has been deleted
datasource_id = None if datasource_id == 'None' else datasource_id
if not datasource_id:
raise Exception(
'The datasource associated with this chart no longer exists')
datasource_id = int(datasource_id)
return datasource_id, datasource_type
|
Protecting from has_access failing from missing perms/view
def can_access(self, permission_name, view_name):
"""Protecting from has_access failing from missing perms/view"""
user = g.user
if user.is_anonymous:
return self.is_item_public(permission_name, view_name)
return self._has_view_access(user, permission_name, view_name)
|
Creates missing perms for datasources, schemas and metrics
def create_missing_perms(self):
"""Creates missing perms for datasources, schemas and metrics"""
from superset import db
from superset.models import core as models
logging.info(
'Fetching a set of all perms to lookup which ones are missing')
all_pvs = set()
for pv in self.get_session.query(self.permissionview_model).all():
if pv.permission and pv.view_menu:
all_pvs.add((pv.permission.name, pv.view_menu.name))
def merge_pv(view_menu, perm):
"""Create permission view menu only if it doesn't exist"""
if view_menu and perm and (view_menu, perm) not in all_pvs:
self.merge_perm(view_menu, perm)
logging.info('Creating missing datasource permissions.')
datasources = ConnectorRegistry.get_all_datasources(db.session)
for datasource in datasources:
merge_pv('datasource_access', datasource.get_perm())
merge_pv('schema_access', datasource.schema_perm)
logging.info('Creating missing database permissions.')
databases = db.session.query(models.Database).all()
for database in databases:
merge_pv('database_access', database.perm)
logging.info('Creating missing metrics permissions')
metrics = []
for datasource_class in ConnectorRegistry.sources.values():
metrics += list(db.session.query(datasource_class.metric_class).all())
for metric in metrics:
if metric.is_restricted:
merge_pv('metric_access', metric.perm)
|
FAB leaves faulty permissions that need to be cleaned up
def clean_perms(self):
"""FAB leaves faulty permissions that need to be cleaned up"""
logging.info('Cleaning faulty perms')
sesh = self.get_session
pvms = (
sesh.query(ab_models.PermissionView)
.filter(or_(
ab_models.PermissionView.permission == None, # NOQA
ab_models.PermissionView.view_menu == None, # NOQA
))
)
deleted_count = pvms.delete()
sesh.commit()
if deleted_count:
logging.info('Deleted {} faulty permissions'.format(deleted_count))
|
Inits the Superset application with security roles and such
def sync_role_definitions(self):
"""Inits the Superset application with security roles and such"""
from superset import conf
logging.info('Syncing role definition')
self.create_custom_permissions()
# Creating default roles
self.set_role('Admin', self.is_admin_pvm)
self.set_role('Alpha', self.is_alpha_pvm)
self.set_role('Gamma', self.is_gamma_pvm)
self.set_role('granter', self.is_granter_pvm)
self.set_role('sql_lab', self.is_sql_lab_pvm)
if conf.get('PUBLIC_ROLE_LIKE_GAMMA', False):
self.set_role('Public', self.is_gamma_pvm)
self.create_missing_perms()
# commit role and view menu updates
self.get_session.commit()
self.clean_perms()
|
Exports the supported import/export schema to a dictionary
def export_schema_to_dict(back_references):
"""Exports the supported import/export schema to a dictionary"""
databases = [Database.export_schema(recursive=True,
include_parent_ref=back_references)]
clusters = [DruidCluster.export_schema(recursive=True,
include_parent_ref=back_references)]
data = dict()
if databases:
data[DATABASES_KEY] = databases
if clusters:
data[DRUID_CLUSTERS_KEY] = clusters
return data
|
Exports databases and druid clusters to a dictionary
def export_to_dict(session,
recursive,
back_references,
include_defaults):
"""Exports databases and druid clusters to a dictionary"""
logging.info('Starting export')
dbs = session.query(Database)
databases = [database.export_to_dict(recursive=recursive,
include_parent_ref=back_references,
include_defaults=include_defaults) for database in dbs]
logging.info('Exported %d %s', len(databases), DATABASES_KEY)
cls = session.query(DruidCluster)
clusters = [cluster.export_to_dict(recursive=recursive,
include_parent_ref=back_references,
include_defaults=include_defaults) for cluster in cls]
logging.info('Exported %d %s', len(clusters), DRUID_CLUSTERS_KEY)
data = dict()
if databases:
data[DATABASES_KEY] = databases
if clusters:
data[DRUID_CLUSTERS_KEY] = clusters
return data
|
Imports databases and druid clusters from dictionary
def import_from_dict(session, data, sync=[]):
"""Imports databases and druid clusters from dictionary"""
if isinstance(data, dict):
logging.info('Importing %d %s',
len(data.get(DATABASES_KEY, [])),
DATABASES_KEY)
for database in data.get(DATABASES_KEY, []):
Database.import_from_dict(session, database, sync=sync)
logging.info('Importing %d %s',
len(data.get(DRUID_CLUSTERS_KEY, [])),
DRUID_CLUSTERS_KEY)
for datasource in data.get(DRUID_CLUSTERS_KEY, []):
DruidCluster.import_from_dict(session, datasource, sync=sync)
session.commit()
else:
logging.info('Supplied object is not a dictionary.')
|
Takes a query_obj constructed in the client and returns payload data response
for the given query_obj.
params: query_context: json_blob
def query(self):
"""
Takes a query_obj constructed in the client and returns payload data response
for the given query_obj.
params: query_context: json_blob
"""
query_context = QueryContext(**json.loads(request.form.get('query_context')))
security_manager.assert_datasource_permission(query_context.datasource)
payload_json = query_context.get_payload()
return json.dumps(
payload_json,
default=utils.json_int_dttm_ser,
ignore_nan=True,
)
|
Get the formdata stored in the database for existing slice.
params: slice_id: integer
def query_form_data(self):
"""
Get the formdata stored in the database for existing slice.
params: slice_id: integer
"""
form_data = {}
slice_id = request.args.get('slice_id')
if slice_id:
slc = db.session.query(models.Slice).filter_by(id=slice_id).one_or_none()
if slc:
form_data = slc.form_data.copy()
update_time_range(form_data)
return json.dumps(form_data)
|
Loads 2 css templates to demonstrate the feature
def load_css_templates():
"""Loads 2 css templates to demonstrate the feature"""
print('Creating default CSS templates')
obj = db.session.query(CssTemplate).filter_by(template_name='Flat').first()
if not obj:
obj = CssTemplate(template_name='Flat')
css = textwrap.dedent("""\
.gridster div.widget {
transition: background-color 0.5s ease;
background-color: #FAFAFA;
border: 1px solid #CCC;
box-shadow: none;
border-radius: 0px;
}
.gridster div.widget:hover {
border: 1px solid #000;
background-color: #EAEAEA;
}
.navbar {
transition: opacity 0.5s ease;
opacity: 0.05;
}
.navbar:hover {
opacity: 1;
}
.chart-header .header{
font-weight: normal;
font-size: 12px;
}
/*
var bnbColors = [
//rausch hackb kazan babu lima beach tirol
'#ff5a5f', '#7b0051', '#007A87', '#00d1c1', '#8ce071', '#ffb400', '#b4a76c',
'#ff8083', '#cc0086', '#00a1b3', '#00ffeb', '#bbedab', '#ffd266', '#cbc29a',
'#ff3339', '#ff1ab1', '#005c66', '#00b3a5', '#55d12e', '#b37e00', '#988b4e',
];
*/
""")
obj.css = css
db.session.merge(obj)
db.session.commit()
obj = (
db.session.query(CssTemplate).filter_by(template_name='Courier Black').first())
if not obj:
obj = CssTemplate(template_name='Courier Black')
css = textwrap.dedent("""\
.gridster div.widget {
transition: background-color 0.5s ease;
background-color: #EEE;
border: 2px solid #444;
border-radius: 15px;
box-shadow: none;
}
h2 {
color: white;
font-size: 52px;
}
.navbar {
box-shadow: none;
}
.gridster div.widget:hover {
border: 2px solid #000;
background-color: #EAEAEA;
}
.navbar {
transition: opacity 0.5s ease;
opacity: 0.05;
}
.navbar:hover {
opacity: 1;
}
.chart-header .header{
font-weight: normal;
font-size: 12px;
}
.nvd3 text {
font-size: 12px;
font-family: inherit;
}
body{
background: #000;
font-family: Courier, Monaco, monospace;;
}
/*
var bnbColors = [
//rausch hackb kazan babu lima beach tirol
'#ff5a5f', '#7b0051', '#007A87', '#00d1c1', '#8ce071', '#ffb400', '#b4a76c',
'#ff8083', '#cc0086', '#00a1b3', '#00ffeb', '#bbedab', '#ffd266', '#cbc29a',
'#ff3339', '#ff1ab1', '#005c66', '#00b3a5', '#55d12e', '#b37e00', '#988b4e',
];
*/
""")
obj.css = css
db.session.merge(obj)
db.session.commit()
|
Get a mapping of foreign name to the local name of foreign keys
def _parent_foreign_key_mappings(cls):
"""Get a mapping of foreign name to the local name of foreign keys"""
parent_rel = cls.__mapper__.relationships.get(cls.export_parent)
if parent_rel:
return {l.name: r.name for (l, r) in parent_rel.local_remote_pairs}
return {}
|
Get all (single column and multi column) unique constraints
def _unique_constrains(cls):
"""Get all (single column and multi column) unique constraints"""
unique = [{c.name for c in u.columns} for u in cls.__table_args__
if isinstance(u, UniqueConstraint)]
unique.extend({c.name} for c in cls.__table__.columns if c.unique)
return unique
|
Export schema as a dictionary
def export_schema(cls, recursive=True, include_parent_ref=False):
"""Export schema as a dictionary"""
parent_excludes = {}
if not include_parent_ref:
parent_ref = cls.__mapper__.relationships.get(cls.export_parent)
if parent_ref:
parent_excludes = {c.name for c in parent_ref.local_columns}
def formatter(c):
return ('{0} Default ({1})'.format(
str(c.type), c.default.arg) if c.default else str(c.type))
schema = {c.name: formatter(c) for c in cls.__table__.columns
if (c.name in cls.export_fields and
c.name not in parent_excludes)}
if recursive:
for c in cls.export_children:
child_class = cls.__mapper__.relationships[c].argument.class_
schema[c] = [child_class.export_schema(recursive=recursive,
include_parent_ref=include_parent_ref)]
return schema
|
Import obj from a dictionary
def import_from_dict(cls, session, dict_rep, parent=None,
recursive=True, sync=[]):
"""Import obj from a dictionary"""
parent_refs = cls._parent_foreign_key_mappings()
export_fields = set(cls.export_fields) | set(parent_refs.keys())
new_children = {c: dict_rep.get(c) for c in cls.export_children
if c in dict_rep}
unique_constrains = cls._unique_constrains()
filters = [] # Using these filters to check if obj already exists
# Remove fields that should not get imported
for k in list(dict_rep):
if k not in export_fields:
del dict_rep[k]
if not parent:
if cls.export_parent:
for p in parent_refs.keys():
if p not in dict_rep:
raise RuntimeError(
'{0}: Missing field {1}'.format(cls.__name__, p))
else:
# Set foreign keys to parent obj
for k, v in parent_refs.items():
dict_rep[k] = getattr(parent, v)
# Add filter for parent obj
filters.extend([getattr(cls, k) == dict_rep.get(k)
for k in parent_refs.keys()])
# Add filter for unique constraints
ucs = [and_(*[getattr(cls, k) == dict_rep.get(k)
for k in cs if dict_rep.get(k) is not None])
for cs in unique_constrains]
filters.append(or_(*ucs))
# Check if object already exists in DB, break if more than one is found
try:
obj_query = session.query(cls).filter(and_(*filters))
obj = obj_query.one_or_none()
except MultipleResultsFound as e:
logging.error('Error importing %s \n %s \n %s', cls.__name__,
str(obj_query),
yaml.safe_dump(dict_rep))
raise e
if not obj:
is_new_obj = True
# Create new DB object
obj = cls(**dict_rep)
logging.info('Importing new %s %s', obj.__tablename__, str(obj))
if cls.export_parent and parent:
setattr(obj, cls.export_parent, parent)
session.add(obj)
else:
is_new_obj = False
logging.info('Updating %s %s', obj.__tablename__, str(obj))
# Update columns
for k, v in dict_rep.items():
setattr(obj, k, v)
# Recursively create children
if recursive:
for c in cls.export_children:
child_class = cls.__mapper__.relationships[c].argument.class_
added = []
for c_obj in new_children.get(c, []):
added.append(child_class.import_from_dict(session=session,
dict_rep=c_obj,
parent=obj,
sync=sync))
# If children should get synced, delete the ones that did not
# get updated.
if c in sync and not is_new_obj:
back_refs = child_class._parent_foreign_key_mappings()
delete_filters = [getattr(child_class, k) ==
getattr(obj, back_refs.get(k))
for k in back_refs.keys()]
to_delete = set(session.query(child_class).filter(
and_(*delete_filters))).difference(set(added))
for o in to_delete:
logging.info('Deleting %s %s', c, str(obj))
session.delete(o)
return obj
|
Export obj to dictionary
def export_to_dict(self, recursive=True, include_parent_ref=False,
include_defaults=False):
"""Export obj to dictionary"""
cls = self.__class__
parent_excludes = {}
if recursive and not include_parent_ref:
parent_ref = cls.__mapper__.relationships.get(cls.export_parent)
if parent_ref:
parent_excludes = {c.name for c in parent_ref.local_columns}
dict_rep = {c.name: getattr(self, c.name)
for c in cls.__table__.columns
if (c.name in self.export_fields and
c.name not in parent_excludes and
(include_defaults or (
getattr(self, c.name) is not None and
(not c.default or
getattr(self, c.name) != c.default.arg))))
}
if recursive:
for c in self.export_children:
# sorting to make lists of children stable
dict_rep[c] = sorted(
[
child.export_to_dict(
recursive=recursive,
include_parent_ref=include_parent_ref,
include_defaults=include_defaults,
) for child in getattr(self, c)
],
key=lambda k: sorted(k.items()))
return dict_rep
|
Overrides the plain fields of the dashboard.
def override(self, obj):
"""Overrides the plain fields of the dashboard."""
for field in obj.__class__.export_fields:
setattr(self, field, getattr(obj, field))
|
Move since and until to time_range.
def update_time_range(form_data):
"""Move since and until to time_range."""
if 'since' in form_data or 'until' in form_data:
form_data['time_range'] = '{} : {}'.format(
form_data.pop('since', '') or '',
form_data.pop('until', '') or '',
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.