text
stringlengths
81
112k
This is a decorator that creates an async with statement around a function, and makes sure that a _session argument is always passed. Only usable on async functions of course. The _session argument is (supposed to be) an aiohttp.ClientSession instance in all functions that this decorator has been used on. This is used to make sure that all session objects are properly entered and exited, or that they are passed into a function properly. This adds an session keyword argument to the method signature, and that session will be used as _session if it is not None. def _uses_aiohttp_session(func): """This is a decorator that creates an async with statement around a function, and makes sure that a _session argument is always passed. Only usable on async functions of course. The _session argument is (supposed to be) an aiohttp.ClientSession instance in all functions that this decorator has been used on. This is used to make sure that all session objects are properly entered and exited, or that they are passed into a function properly. This adds an session keyword argument to the method signature, and that session will be used as _session if it is not None.""" # The function the decorator returns async def decorated_func(*args, session=None, **kwargs): if session is not None: # There is a session passed return await func(*args, _session=session, **kwargs) else: # The session argument wasn't passed, so we create our own async with aiohttp.ClientSession() as new_session: return await func(*args, _session=new_session, **kwargs) # We return the decorated func return decorated_func
Adds the ratelimit and request timeout parameters to a function. def _add_request_parameters(func): """Adds the ratelimit and request timeout parameters to a function.""" # The function the decorator returns async def decorated_func(*args, handle_ratelimit=None, max_tries=None, request_timeout=None, **kwargs): return await func(*args, handle_ratelimit=handle_ratelimit, max_tries=max_tries, request_timeout=request_timeout, **kwargs) # We return the decorated func return decorated_func
Returns the stats for the profiles on the specified regions and platform. The format for regions without a matching user, the format is the same as get_profile. The stats are returned in a dictionary with a similar format to what https://github.com/SunDwarf/OWAPI/blob/master/api.md#get-apiv3ubattletagstats specifies. async def get_stats(self, battletag: str, regions=(EUROPE, KOREA, AMERICAS, CHINA, JAPAN, ANY), platform=None, _session=None, handle_ratelimit=None, max_tries=None, request_timeout=None): """Returns the stats for the profiles on the specified regions and platform. The format for regions without a matching user, the format is the same as get_profile. The stats are returned in a dictionary with a similar format to what https://github.com/SunDwarf/OWAPI/blob/master/api.md#get-apiv3ubattletagstats specifies.""" if platform is None: platform = self.default_platform try: blob_dict = await self._base_request(battletag, "stats", _session, platform=platform, handle_ratelimit=handle_ratelimit, max_tries=max_tries, request_timeout=request_timeout) except ProfileNotFoundError as e: # The battletag doesn't exist blob_dict = {} existing_regions = {key: val for key, val in blob_dict.items() if ((val is not None) and (key != "_request"))} return {key: [inner_val for inner_key, inner_val in val.items() if inner_key == "stats"][0] for key, val in existing_regions.items() if key in regions}
Does a request to some endpoint. This is also where ratelimit logic is handled. async def _base_request(self, battle_tag: str, endpoint_name: str, session: aiohttp.ClientSession, *, platform=None, handle_ratelimit=None, max_tries=None, request_timeout=None): """Does a request to some endpoint. This is also where ratelimit logic is handled.""" # We check the different optional arguments, and if they're not passed (are none) we set them to the default for the client object if platform is None: platform = self.default_platform if handle_ratelimit is None: handle_ratelimit = self.default_handle_ratelimit if max_tries is None: max_tries = self.default_max_tries if request_timeout is None: request_timeout = self.default_request_timeout # The battletag with #s removed san_battle_tag = self.sanitize_battletag(battle_tag) # The ratelimit logic for _ in range(max_tries): # We execute a request try: resp_json, status = await self._async_get( session, self.server_url + self._api_urlpath + "{battle_tag}/{endpoint}".format( battle_tag=san_battle_tag, endpoint=endpoint_name ), params={"platform": platform}, # Passed to _async_get and indicates what platform we're searching on headers={"User-Agent": "overwatch_python_api"}, # According to https://github.com/SunDwarf/OWAPI/blob/master/owapi/v3/v3_util.py#L18 we have to customise our User-Agent, so we do _async_timeout_seconds=request_timeout ) if status == 429 and resp_json["msg"] == "you are being ratelimited": raise RatelimitError except RatelimitError as e: # This excepts both RatelimitErrors and TimeoutErrors, ratelimiterrors for server returning a ratelimit, timeouterrors for the connection not being done in with in the timeout # We are ratelimited, so we check if we handle ratelimiting logic # If so, we wait and then execute the next iteration of the loop if handle_ratelimit: # We wait to remedy ratelimiting, and we wait a bit more than the response says we should await asyncio.sleep(resp_json["retry"] + 1) continue else: raise else: # We didn't get an error, so we exit the loop because it was a successful request break else: # The loop didn't stop because it got breaked, which means that we got ratelimited until the maximum number of tries were finished raise RatelimitError("Got ratelimited for each requests until the maximum number of retries were reached.") # Validate the response if status != 200: if status == 404 and resp_json["msg"] == "profile not found": raise ProfileNotFoundError( "Got HTTP 404, profile not found. This is caused by the given battletag not existing on the specified platform.") if status == 429 and resp_json["msg"] == "you are being ratelimited": raise RatelimitError( "Got HTTP 429, you are being ratelimited. This is caused by calls to the api too frequently.") raise ConnectionError("Did not get HTTP status 200, got: {0}".format(status)) return resp_json
Uses aiohttp to make a get request asynchronously. Will raise asyncio.TimeoutError if the request could not be completed within _async_timeout_seconds (default 5) seconds. async def _async_get(self, session: aiohttp.ClientSession, *args, _async_timeout_seconds: int = 5, **kwargs): """Uses aiohttp to make a get request asynchronously. Will raise asyncio.TimeoutError if the request could not be completed within _async_timeout_seconds (default 5) seconds.""" # Taken almost directly from the aiohttp tutorial with async_timeout.timeout(_async_timeout_seconds): async with session.get(*args, **kwargs) as response: return await response.json(), response.status
Check if argument is a method. Optionally, we can also check if minimum or maximum arities (number of accepted arguments) match given minimum and/or maximum. def is_method(arg, min_arity=None, max_arity=None): """Check if argument is a method. Optionally, we can also check if minimum or maximum arities (number of accepted arguments) match given minimum and/or maximum. """ if not callable(arg): return False if not any(is_(arg) for is_ in (inspect.ismethod, inspect.ismethoddescriptor, inspect.isbuiltin)): return False try: argnames, varargs, kwargs, defaults = getargspec(arg) except TypeError: # On CPython 2.x, built-in methods of file aren't inspectable, # so if it's file.read() or file.write(), we can't tell it for sure. # Given how this check is being used, assuming the best is probably # all we can do here. return True else: if argnames and argnames[0] == 'self': argnames = argnames[1:] if min_arity is not None: actual_min_arity = len(argnames) - len(defaults or ()) assert actual_min_arity >= 0, ( "Minimum arity of %r found to be negative (got %s)!" % ( arg, actual_min_arity)) if int(min_arity) != actual_min_arity: return False if max_arity is not None: actual_max_arity = sys.maxsize if varargs or kwargs else len(argnames) if int(max_arity) != actual_max_arity: return False return True
Check if the argument is a readable file-like object. def _is_readable(self, obj): """Check if the argument is a readable file-like object.""" try: read = getattr(obj, 'read') except AttributeError: return False else: return is_method(read, max_arity=1)
Check if the argument is a writable file-like object. def _is_writable(self, obj): """Check if the argument is a writable file-like object.""" try: write = getattr(obj, 'write') except AttributeError: return False else: return is_method(write, min_arity=1, max_arity=1)
loops the rungtd1d function below. Figure it's easier to troubleshoot in Python than Fortran. def run(time: datetime, altkm: float, glat: Union[float, np.ndarray], glon: Union[float, np.ndarray], *, f107a: float = None, f107: float = None, Ap: int = None) -> xarray.Dataset: """ loops the rungtd1d function below. Figure it's easier to troubleshoot in Python than Fortran. """ glat = np.atleast_2d(glat) glon = np.atleast_2d(glon) # has to be here # %% altitude 1-D if glat.size == 1 and glon.size == 1 and isinstance(time, (str, date, datetime, np.datetime64)): atmos = rungtd1d(time, altkm, glat.squeeze()[()], glon.squeeze()[()], f107a=f107a, f107=f107, Ap=Ap) # %% lat/lon grid at 1 altitude else: atmos = loopalt_gtd(time, glat, glon, altkm, f107a=f107a, f107=f107, Ap=Ap) return atmos
loop over location and time time: datetime or numpy.datetime64 or list of datetime or np.ndarray of datetime glat: float or 2-D np.ndarray glon: float or 2-D np.ndarray altkm: float or list or 1-D np.ndarray def loopalt_gtd(time: datetime, glat: Union[float, np.ndarray], glon: Union[float, np.ndarray], altkm: Union[float, List[float], np.ndarray], *, f107a: float = None, f107: float = None, Ap: int = None) -> xarray.Dataset: """ loop over location and time time: datetime or numpy.datetime64 or list of datetime or np.ndarray of datetime glat: float or 2-D np.ndarray glon: float or 2-D np.ndarray altkm: float or list or 1-D np.ndarray """ glat = np.atleast_2d(glat) glon = np.atleast_2d(glon) assert glat.ndim == glon.ndim == 2 times = np.atleast_1d(time) assert times.ndim == 1 atmos = xarray.Dataset() for k, t in enumerate(times): print('computing', t) for i in range(glat.shape[0]): for j in range(glat.shape[1]): # atmos = xarray.concat((atmos, rungtd1d(t, altkm, glat[i,j], glon[i,j])), # data_vars='minimal',coords='minimal',dim='lon') atm = rungtd1d(t, altkm, glat[i, j], glon[i, j], f107a=f107a, f107=f107, Ap=Ap) atmos = xarray.merge((atmos, atm)) atmos.attrs = atm.attrs return atmos
This is the "atomic" function looped by other functions def rungtd1d(time: datetime, altkm: np.ndarray, glat: float, glon: float, *, f107a: float = None, f107: float = None, Ap: int = None) -> xarray.Dataset: """ This is the "atomic" function looped by other functions """ time = todatetime(time) # %% get solar parameters for date if f107a and f107a and Ap: pass else: f107Ap = gi.getApF107(time, smoothdays=81) f107a = f107Ap['f107s'].item() f107 = f107Ap['f107'].item() Ap = f107Ap['Ap'].item() # %% dimensions altkm = np.atleast_1d(altkm) assert altkm.ndim == 1 assert isinstance(glon, (int, float)) assert isinstance(glat, (int, float)) # %% iyd = time.strftime('%y%j') altkm = np.atleast_1d(altkm) # %% dens = np.empty((altkm.size, len(species))) temp = np.empty((altkm.size, len(ttypes))) for i, a in enumerate(altkm): cmd = [str(EXE), iyd, str(time.hour), str(time.minute), str(time.second), str(glat), str(glon), str(f107a), str(f107), str(Ap), str(a)] ret = subprocess.check_output(cmd, universal_newlines=True, stderr=subprocess.DEVNULL) f = io.StringIO(ret) dens[i, :] = np.genfromtxt(f, max_rows=1) temp[i, :] = np.genfromtxt(f, max_rows=1) dsf = {k: (('time', 'alt_km', 'lat', 'lon'), v[None, :, None, None]) for (k, v) in zip(species, dens.T)} dsf.update({'Tn': (('time', 'alt_km', 'lat', 'lon'), temp[:, 1][None, :, None, None]), 'Texo': (('time', 'alt_km', 'lat', 'lon'), temp[:, 0][None, :, None, None])}) atmos = xarray.Dataset(dsf, coords={'time': [time], 'alt_km': altkm, 'lat': [glat], 'lon': [glon], }, attrs={'Ap': Ap, 'f107': f107, 'f107a': f107a, 'species': species}) return atmos
Validate the predicate description. def _validate_desc(self, desc): """Validate the predicate description.""" if desc is None: return desc if not isinstance(desc, STRING_TYPES): raise TypeError( "predicate description for Matching must be a string, " "got %r" % (type(desc),)) # Python 2 mandates __repr__ to be an ASCII string, # so if Unicode is passed (usually due to unicode_literals), # it should be ASCII-encodable. if not IS_PY3 and isinstance(desc, unicode): try: desc = desc.encode('ascii', errors='strict') except UnicodeEncodeError: raise TypeError("predicate description must be " "an ASCII string in Python 2") return desc
Return the placeholder part of matcher's ``__repr__``. def _get_placeholder_repr(self): """Return the placeholder part of matcher's ``__repr__``.""" placeholder = '...' if self.TRANSFORM is not None: placeholder = '%s(%s)' % (self.TRANSFORM.__name__, placeholder) return placeholder
Ensure the matcher class definition is acceptable. :raise RuntimeError: If there is a problem def _validate_class_definition(meta, classname, bases, dict_): """Ensure the matcher class definition is acceptable. :raise RuntimeError: If there is a problem """ # let the BaseMatcher class be created without hassle if meta._is_base_matcher_class_definition(classname, dict_): return # ensure that no important magic methods are being overridden for name, member in dict_.items(): if not (name.startswith('__') and name.endswith('__')): continue # check if it's not a whitelisted magic method name name = name[2:-2] if not name: continue # unlikely case of a ``____`` function if name not in meta._list_magic_methods(BaseMatcher): continue if name in meta.USER_OVERRIDABLE_MAGIC_METHODS: continue # non-function attributes, like __slots__, are harmless if not inspect.isfunction(member): continue # classes in this very module are exempt, since they define # the very behavior of matchers we want to protect if member.__module__ == __name__: continue raise RuntimeError( "matcher class %s cannot override the __%s__ method" % ( classname, name))
Checks whether given class name and dictionary define the :class:`BaseMatcher`. def _is_base_matcher_class_definition(meta, classname, dict_): """Checks whether given class name and dictionary define the :class:`BaseMatcher`. """ if classname != 'BaseMatcher': return False methods = list(filter(inspect.isfunction, dict_.values())) return methods and all(m.__module__ == __name__ for m in methods)
Return names of magic methods defined by a class. :return: Iterable of magic methods, each w/o the ``__`` prefix/suffix def _list_magic_methods(meta, class_): """Return names of magic methods defined by a class. :return: Iterable of magic methods, each w/o the ``__`` prefix/suffix """ return [ name[2:-2] for name, member in class_.__dict__.items() if len(name) > 4 and name.startswith('__') and name.endswith('__') and inspect.isfunction(member) ]
if (!(this instanceof SemVer)) return new SemVer(version, loose); def semver(version, loose): if isinstance(version, SemVer): if version.loose == loose: return version else: version = version.version elif not isinstance(version, str): # xxx: raise InvalidTypeIncluded("must be str, but {!r}".format(version)) """ if (!(this instanceof SemVer)) return new SemVer(version, loose); """ return SemVer(version, loose)
Handler for the event emitted when autodoc processes a docstring. See http://sphinx-doc.org/ext/autodoc.html#event-autodoc-process-docstring. The TL;DR is that we can modify ``lines`` in-place to influence the output. def autodoc_process_docstring(app, what, name, obj, options, lines): """Handler for the event emitted when autodoc processes a docstring. See http://sphinx-doc.org/ext/autodoc.html#event-autodoc-process-docstring. The TL;DR is that we can modify ``lines`` in-place to influence the output. """ # check that only symbols that can be directly imported from ``callee`` # package are being documented _, symbol = name.rsplit('.', 1) if symbol not in callee.__all__: raise SphinxError( "autodoc'd '%s' is not a part of the public API!" % name) # for classes exempt from automatic merging of class & __init__ docs, # pretend their __init__ methods have no docstring at all, # so that nothing will be appended to the class's docstring if what == 'class' and name in autoclass_content_exceptions: # amusingly, when autodoc reads the constructor's docstring # for appending it to class docstring, it will report ``what`` # as 'class' (again!); hence we must check what it actually read ctor_docstring_lines = prepare_docstring(obj.__init__.__doc__) if lines == ctor_docstring_lines: lines[:] = []
Raise ValidationError if the contact exists. def clean_email(self): """ Raise ValidationError if the contact exists. """ contacts = self.api.lists.contacts(id=self.list_id)['result'] for contact in contacts: if contact['email'] == self.cleaned_data['email']: raise forms.ValidationError( _(u'This email is already subscribed')) return self.cleaned_data['email']
Create a contact with using the email on the list. def add_contact(self): """ Create a contact with using the email on the list. """ self.api.lists.addcontact( contact=self.cleaned_data['email'], id=self.list_id, method='POST')
Get or create an Api() instance using django settings. def api(self): """ Get or create an Api() instance using django settings. """ api = getattr(self, '_api', None) if api is None: self._api = mailjet.Api() return self._api
Get or create the list id. def list_id(self): """ Get or create the list id. """ list_id = getattr(self, '_list_id', None) if list_id is None: for l in self.api.lists.all()['lists']: if l['name'] == self.list_name: self._list_id = l['id'] if not getattr(self, '_list_id', None): self._list_id = self.api.lists.create( label=self.list_label, name=self.list_name, method='POST')['list_id'] return self._list_id
Portable version of inspect.getargspec(). Necessary because the original is no longer available starting from Python 3.6. :return: 4-tuple of (argnames, varargname, kwargname, defaults) Note that distinction between positional-or-keyword and keyword-only parameters will be lost, as the original getargspec() doesn't honor it. def getargspec(obj): """Portable version of inspect.getargspec(). Necessary because the original is no longer available starting from Python 3.6. :return: 4-tuple of (argnames, varargname, kwargname, defaults) Note that distinction between positional-or-keyword and keyword-only parameters will be lost, as the original getargspec() doesn't honor it. """ try: return inspect.getargspec(obj) except AttributeError: pass # we let a TypeError through # translate the signature object back into the 4-tuple argnames = [] varargname, kwargname = None, None defaults = [] for name, param in inspect.signature(obj): if param.kind == inspect.Parameter.VAR_POSITIONAL: varargname = name elif param.kind == inspect.Parameter.VAR_KEYWORD: kwargname = name else: argnames.append(name) if param.default is not inspect.Parameter.empty: defaults.append(param.default) defaults = defaults or None return argnames, varargname, kwargname, defaults
Reads values of "magic tags" defined in the given Python file. :param filename: Python filename to read the tags from :return: Dictionary of tags def read_tags(filename): """Reads values of "magic tags" defined in the given Python file. :param filename: Python filename to read the tags from :return: Dictionary of tags """ with open(filename) as f: ast_tree = ast.parse(f.read(), filename) res = {} for node in ast.walk(ast_tree): if type(node) is not ast.Assign: continue target = node.targets[0] if type(target) is not ast.Name: continue if not (target.id.startswith('__') and target.id.endswith('__')): continue name = target.id[2:-2] res[name] = ast.literal_eval(node.value) return res
Normalize any unicode characters to ascii equivalent https://docs.python.org/2/library/unicodedata.html#unicodedata.normalize def normalize_unicode(text): """ Normalize any unicode characters to ascii equivalent https://docs.python.org/2/library/unicodedata.html#unicodedata.normalize """ if isinstance(text, six.text_type): return unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('utf8') else: return text
Parses the given text and yields tokens which represent words within the given text. Tokens are assumed to be divided by any form of whitespace character. def word_tokenize(text, stopwords=_stopwords, ngrams=None, min_length=0, ignore_numeric=True): """ Parses the given text and yields tokens which represent words within the given text. Tokens are assumed to be divided by any form of whitespace character. """ if ngrams is None: ngrams = 1 text = re.sub(re.compile('\'s'), '', text) # Simple heuristic text = re.sub(_re_punctuation, '', text) matched_tokens = re.findall(_re_token, text.lower()) for tokens in get_ngrams(matched_tokens, ngrams): for i in range(len(tokens)): tokens[i] = tokens[i].strip(punctuation) if len(tokens[i]) < min_length or tokens[i] in stopwords: break if ignore_numeric and isnumeric(tokens[i]): break else: yield tuple(tokens)
attempt to build using CMake >= 3 def cmake_setup(): """ attempt to build using CMake >= 3 """ cmake_exe = shutil.which('cmake') if not cmake_exe: raise FileNotFoundError('CMake not available') wopts = ['-G', 'MinGW Makefiles', '-DCMAKE_SH="CMAKE_SH-NOTFOUND'] if os.name == 'nt' else [] subprocess.check_call([cmake_exe] + wopts + [str(SRCDIR)], cwd=BINDIR) ret = subprocess.run([cmake_exe, '--build', str(BINDIR)], stderr=subprocess.PIPE, universal_newlines=True) result(ret)
attempt to build with Meson + Ninja def meson_setup(): """ attempt to build with Meson + Ninja """ meson_exe = shutil.which('meson') ninja_exe = shutil.which('ninja') if not meson_exe or not ninja_exe: raise FileNotFoundError('Meson or Ninja not available') if not (BINDIR / 'build.ninja').is_file(): subprocess.check_call([meson_exe, str(SRCDIR)], cwd=BINDIR) ret = subprocess.run(ninja_exe, cwd=BINDIR, stderr=subprocess.PIPE, universal_newlines=True) result(ret)
Adds an occurrence of the term in the specified document. def add_term_occurrence(self, term, document): """ Adds an occurrence of the term in the specified document. """ if document not in self._documents: self._documents[document] = 0 if term not in self._terms: if self._freeze: return else: self._terms[term] = collections.Counter() if document not in self._terms[term]: self._terms[term][document] = 0 self._documents[document] += 1 self._terms[term][document] += 1
Gets the frequency of the specified term in the entire corpus added to the HashedIndex. def get_total_term_frequency(self, term): """ Gets the frequency of the specified term in the entire corpus added to the HashedIndex. """ if term not in self._terms: raise IndexError(TERM_DOES_NOT_EXIST) return sum(self._terms[term].values())
Returns the frequency of the term specified in the document. def get_term_frequency(self, term, document, normalized=False): """ Returns the frequency of the term specified in the document. """ if document not in self._documents: raise IndexError(DOCUMENT_DOES_NOT_EXIST) if term not in self._terms: raise IndexError(TERM_DOES_NOT_EXIST) result = self._terms[term].get(document, 0) if normalized: result /= self.get_document_length(document) return float(result)
Returns the number of documents the specified term appears in. def get_document_frequency(self, term): """ Returns the number of documents the specified term appears in. """ if term not in self._terms: raise IndexError(TERM_DOES_NOT_EXIST) else: return len(self._terms[term])
Returns the number of terms found within the specified document. def get_document_length(self, document): """ Returns the number of terms found within the specified document. """ if document in self._documents: return self._documents[document] else: raise IndexError(DOCUMENT_DOES_NOT_EXIST)
Returns all documents related to the specified term in the form of a Counter object. def get_documents(self, term): """ Returns all documents related to the specified term in the form of a Counter object. """ if term not in self._terms: raise IndexError(TERM_DOES_NOT_EXIST) else: return self._terms[term]
Returns the Term-Frequency Inverse-Document-Frequency value for the given term in the specified document. If normalized is True, term frequency will be divided by the document length. def get_tfidf(self, term, document, normalized=False): """ Returns the Term-Frequency Inverse-Document-Frequency value for the given term in the specified document. If normalized is True, term frequency will be divided by the document length. """ tf = self.get_term_frequency(term, document) # Speeds up performance by avoiding extra calculations if tf != 0.0: # Add 1 to document frequency to prevent divide by 0 # (Laplacian Correction) df = 1 + self.get_document_frequency(term) n = 2 + len(self._documents) if normalized: tf /= self.get_document_length(document) return tf * math.log10(n / df) else: return 0.0
Returns a representation of the specified document as a feature vector weighted according the mode specified (by default tf-dif). A custom weighting function can also be passed which receives the hashedindex instance, the selected term and document as parameters. The result will be returned in the form of a list. This can be converted into a numpy array if required using the `np.asarray` method Available built-in modes: * tfidf: Term Frequency Inverse Document Frequency * ntfidf: Normalized Term Frequency Inverse Document Frequency * tf: Term Frequency * ntf: Normalized Term Frequency def generate_document_vector(self, doc, mode='tfidf'): """ Returns a representation of the specified document as a feature vector weighted according the mode specified (by default tf-dif). A custom weighting function can also be passed which receives the hashedindex instance, the selected term and document as parameters. The result will be returned in the form of a list. This can be converted into a numpy array if required using the `np.asarray` method Available built-in modes: * tfidf: Term Frequency Inverse Document Frequency * ntfidf: Normalized Term Frequency Inverse Document Frequency * tf: Term Frequency * ntf: Normalized Term Frequency """ if mode == 'tfidf': selected_function = HashedIndex.get_tfidf elif mode == 'ntfidf': selected_function = functools.partial(HashedIndex.get_tfidf, normalized=True) elif mode == 'tf': selected_function = HashedIndex.get_term_frequency elif mode == 'ntf': selected_function = functools.partial(HashedIndex.get_term_frequency, normalized=True) elif hasattr(mode, '__call__'): selected_function = mode else: raise ValueError('Unexpected mode: %s', mode) result = [] for term in self._terms: result.append(selected_function(self, term, doc)) return result
Returns a feature matrix in the form of a list of lists which represents the terms and documents in this Inverted Index using the tf-idf weighting by default. The term counts in each document can alternatively be used by specifying scheme='count'. A custom weighting function can also be passed which receives a term and document as parameters. The size of the matrix is equal to m x n where m is the number of documents and n is the number of terms. The list-of-lists format returned by this function can be very easily converted to a numpy matrix if required using the `np.as_matrix` method. def generate_feature_matrix(self, mode='tfidf'): """ Returns a feature matrix in the form of a list of lists which represents the terms and documents in this Inverted Index using the tf-idf weighting by default. The term counts in each document can alternatively be used by specifying scheme='count'. A custom weighting function can also be passed which receives a term and document as parameters. The size of the matrix is equal to m x n where m is the number of documents and n is the number of terms. The list-of-lists format returned by this function can be very easily converted to a numpy matrix if required using the `np.as_matrix` method. """ result = [] for doc in self._documents: result.append(self.generate_document_vector(doc, mode)) return result
Returns the first occurrence of an instance of type `klass` in the given list, or None if no such instance is present. def find_class_in_list(klass, lst): """ Returns the first occurrence of an instance of type `klass` in the given list, or None if no such instance is present. """ filtered = list(filter(lambda x: x.__class__ == klass, lst)) if filtered: return filtered[0] return None
Returns a tuple containing an entry corresponding to each of the requested class types, where the entry is either the first object instance of that type or None of no such instances are available. Example Usage: find_classes_in_list( [Address, Response], [<classes.Response...>, <classes.Amount...>, <classes.Address...>]) Produces: (<classes.Address...>, <classes.Response...>) def find_classes_in_list(klasses, lst): """ Returns a tuple containing an entry corresponding to each of the requested class types, where the entry is either the first object instance of that type or None of no such instances are available. Example Usage: find_classes_in_list( [Address, Response], [<classes.Response...>, <classes.Amount...>, <classes.Address...>]) Produces: (<classes.Address...>, <classes.Response...>) """ if not isinstance(klasses, list): klasses = [klasses] return tuple(map(lambda klass: find_class_in_list(klass, lst), klasses))
Converts a dictionary of name and value pairs into a PARMLIST string value acceptable to the Payflow Pro API. def _build_parmlist(self, parameters): """ Converts a dictionary of name and value pairs into a PARMLIST string value acceptable to the Payflow Pro API. """ args = [] for key, value in parameters.items(): if not value is None: # We always use the explicit-length keyname format, to reduce the chance # of requests failing due to unusual characters in parameter values. try: classinfo = unicode except NameError: classinfo = str if isinstance(value, classinfo): key = '%s[%d]' % (key.upper(), len(value.encode('utf-8'))) else: key = '%s[%d]' % (key.upper(), len(str(value))) args.append('%s=%s' % (key, value)) args.sort() parmlist = '&'.join(args) return parmlist
Parses a PARMLIST string into a dictionary of name and value pairs. The parsing is complicated by the following: - parameter keynames may or may not include a length specification - delimiter characters (=, &) may appear inside parameter values, provided the parameter has an explicit length. For example, the following parmlist values are possible: A=B&C=D A[1]=B&C[1]=D A=B&C[1]=D A[3]=B&B&C[1]=D (Here, the value of A is "B&B") A[1]=B&C[3]=D=7 (Here, the value of C is "D=7") def _parse_parmlist(self, parmlist): """ Parses a PARMLIST string into a dictionary of name and value pairs. The parsing is complicated by the following: - parameter keynames may or may not include a length specification - delimiter characters (=, &) may appear inside parameter values, provided the parameter has an explicit length. For example, the following parmlist values are possible: A=B&C=D A[1]=B&C[1]=D A=B&C[1]=D A[3]=B&B&C[1]=D (Here, the value of A is "B&B") A[1]=B&C[3]=D=7 (Here, the value of C is "D=7") """ parmlist = "&" + parmlist name_re = re.compile(r'\&([A-Z0-9_]+)(\[\d+\])?=') results = {} offset = 0 match = name_re.search(parmlist, offset) while match: name, len_suffix = match.groups() offset = match.end() if len_suffix: val_len = int(len_suffix[1:-1]) else: next_match = name_re.search(parmlist, offset) if next_match: val_len = next_match.start() - match.end() else: # At end of parmlist val_len = len(parmlist) - match.end() value = parmlist[match.end() : match.end() + val_len] results[name.lower()] = value match = name_re.search(parmlist, offset) return results
Send a http request to the given *url*, try to decode the reply assuming it's JSON in UTF-8, and return the result :returns: Decoded result, or None in case of an error :rtype: mixed def request(self, url): """ Send a http request to the given *url*, try to decode the reply assuming it's JSON in UTF-8, and return the result :returns: Decoded result, or None in case of an error :rtype: mixed """ self.logger.debug('url:\n' + url) try: response = urlopen(url) return json.loads(response.read().decode('utf-8')) except URLError: self.logger.info('Server connection problem') except Exception: self.logger.info('Server format problem')
Issue a geocoding query for *address* to the Nominatim instance and return the decoded results :param address: a query string with an address or presumed parts of an address :type address: str or (if python2) unicode :param acceptlanguage: rfc2616 language code :type acceptlanguage: str or None :param limit: limit the number of results :type limit: int or None :param countrycodes: restrict the search to countries given by their ISO 3166-1alpha2 codes (cf. https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2 ) :type countrycodes: str iterable :returns: a list of search results (each a dict) :rtype: list or None def query(self, address, acceptlanguage=None, limit=20, countrycodes=None): """ Issue a geocoding query for *address* to the Nominatim instance and return the decoded results :param address: a query string with an address or presumed parts of an address :type address: str or (if python2) unicode :param acceptlanguage: rfc2616 language code :type acceptlanguage: str or None :param limit: limit the number of results :type limit: int or None :param countrycodes: restrict the search to countries given by their ISO 3166-1alpha2 codes (cf. https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2 ) :type countrycodes: str iterable :returns: a list of search results (each a dict) :rtype: list or None """ url = self.url + '&q=' + quote_plus(address) if acceptlanguage: url += '&accept-language=' + acceptlanguage if limit: url += '&limit=' + str(limit) if countrycodes: url += '&countrycodes=' + ','.join(countrycodes) return self.request(url)
Issue a reverse geocoding query for a place given by *lat* and *lon*, or by *osm_id* and *osm_type* to the Nominatim instance and return the decoded results :param lat: the geograpical latitude of the place :param lon: the geograpical longitude of the place :param osm_id: openstreetmap identifier osm_id :type osm_id: str :param osm_type: openstreetmap type osm_type :type osm_type: str :param acceptlanguage: rfc2616 language code :type acceptlanguage: str or None :param zoom: zoom factor between from 0 to 18 :type zoom: int or None or a key in :data:`zoom_aliases` :param countrycodes: restrict the search to countries given by their ISO 3166-1alpha2 codes (cf. https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2 ) :type countrycodes: str iterable :returns: a list of search results (each a dict) :rtype: list or None :raise: NominatimException if invalid zoom value def query(self, lat=None, lon=None, osm_id=None, osm_type=None, acceptlanguage='', zoom=18): """ Issue a reverse geocoding query for a place given by *lat* and *lon*, or by *osm_id* and *osm_type* to the Nominatim instance and return the decoded results :param lat: the geograpical latitude of the place :param lon: the geograpical longitude of the place :param osm_id: openstreetmap identifier osm_id :type osm_id: str :param osm_type: openstreetmap type osm_type :type osm_type: str :param acceptlanguage: rfc2616 language code :type acceptlanguage: str or None :param zoom: zoom factor between from 0 to 18 :type zoom: int or None or a key in :data:`zoom_aliases` :param countrycodes: restrict the search to countries given by their ISO 3166-1alpha2 codes (cf. https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2 ) :type countrycodes: str iterable :returns: a list of search results (each a dict) :rtype: list or None :raise: NominatimException if invalid zoom value """ url = self.url if osm_id is not None and osm_type not in ('N', 'W', 'R'): raise NominatimException('invalid osm_type') if osm_id is not None and osm_type is not None: url += '&osm_id=' + osm_id + '&osm_type=' + osm_type elif lat is not None and lon is not None: url += '&lat=' + str(lat) + '&lon=' + str(lon) else: return None if acceptlanguage: url += '&accept-language=' + acceptlanguage if zoom in zoom_aliases: zoom = zoom_aliases[zoom] if not isinstance(zoom, int) or zoom < 0 or zoom > 18: raise NominatimException('zoom must effectively be betwen 0 and 18') url +='&zoom=' + str(zoom) return self.request(url)
Define a grid using the specifications of a given model. Parameters ---------- model_name : string Name the model (see :func:`get_supported_models` for available model names). Supports multiple formats (e.g., 'GEOS5', 'GEOS-5' or 'GEOS_5'). **kwargs : string Parameters that override the model or default grid settings (See Other Parameters below). Returns ------- A :class:`CTMGrid` object. Other Parameters ---------------- resolution : (float, float) Horizontal grid resolution (lon, lat) or (DI, DJ) [degrees] Psurf : float Average surface pressure [hPa] (default: 1013.15) Notes ----- Regridded vertical models may have several valid names (e.g., 'GEOS5_47L' and 'GEOS5_REDUCED' refer to the same model). def from_model(cls, model_name, **kwargs): """ Define a grid using the specifications of a given model. Parameters ---------- model_name : string Name the model (see :func:`get_supported_models` for available model names). Supports multiple formats (e.g., 'GEOS5', 'GEOS-5' or 'GEOS_5'). **kwargs : string Parameters that override the model or default grid settings (See Other Parameters below). Returns ------- A :class:`CTMGrid` object. Other Parameters ---------------- resolution : (float, float) Horizontal grid resolution (lon, lat) or (DI, DJ) [degrees] Psurf : float Average surface pressure [hPa] (default: 1013.15) Notes ----- Regridded vertical models may have several valid names (e.g., 'GEOS5_47L' and 'GEOS5_REDUCED' refer to the same model). """ settings = _get_model_info(model_name) model = settings.pop('model_name') for k, v in list(kwargs.items()): if k in ('resolution', 'Psurf'): settings[k] = v return cls(model, **settings)
Set-up a user-defined grid using specifications of a reference grid model. Parameters ---------- model_name : string name of the user-defined grid model. reference : string or :class:`CTMGrid` instance Name of the reference model (see :func:`get_supported_models`), or a :class:`CTMGrid` object from which grid set-up is copied. **kwargs Any set-up parameter which will override the settings of the reference model (see :class:`CTMGrid` parameters). Returns ------- A :class:`CTMGrid` object. def copy_from_model(cls, model_name, reference, **kwargs): """ Set-up a user-defined grid using specifications of a reference grid model. Parameters ---------- model_name : string name of the user-defined grid model. reference : string or :class:`CTMGrid` instance Name of the reference model (see :func:`get_supported_models`), or a :class:`CTMGrid` object from which grid set-up is copied. **kwargs Any set-up parameter which will override the settings of the reference model (see :class:`CTMGrid` parameters). Returns ------- A :class:`CTMGrid` object. """ if isinstance(reference, cls): settings = reference.__dict__.copy() settings.pop('model') else: settings = _get_model_info(reference) settings.pop('model_name') settings.update(kwargs) settings['reference'] = reference return cls(model_name, **settings)
Compute scalars or coordinates associated to the vertical layers. Parameters ---------- grid_spec : CTMGrid object CTMGrid containing the information necessary to re-construct grid levels for a given model coordinate system. Returns ------- dictionary of vertical grid components, including eta (unitless), sigma (unitless), pressure (hPa), and altitude (km) on both layer centers and edges, ordered from bottom-to-top. Notes ----- For pure sigma grids, sigma coordinates are given by the esig (edges) and csig (centers). For both pure sigma and hybrid grids, pressures at layers edges L are calculated as follows: .. math:: P_e(L) = A_p(L) + B_p(L) * (P_{surf} - C_p) where :math:`P_{surf}`, :math:`P_{top}` Air pressures at the surface and the top of the modeled atmosphere (:attr:`Psurf` and :attr:`Ptop` attributes of the :class:`CTMGrid` instance). :math:`A_p(L)`, :math:`Bp(L)` Specified in the grid set-up (`Ap` and `Bp` attributes) for hybrid grids, or respectively equals :math:`P_{top}` and :attr:`esig` attribute for pure sigma grids. :math:`Cp(L)` equals :math:`P_{top}` for pure sigma grids or equals 0 for hybrid grids. Pressures at grid centers are averages of pressures at grid edges: .. math:: P_c(L) = (P_e(L) + P_e(L+1)) / 2 For hybrid grids, ETA coordinates of grid edges and grid centers are given by; .. math:: ETA_{e}(L) = (P_e(L) - P_{top}) / (P_{surf} - P_{top}) .. math:: ETA_{c}(L) = (P_c(L) - P_{top}) / (P_{surf} - P_{top}) Altitude values are fit using a 5th-degree polynomial; see `gridspec.prof_altitude` for more details. def get_layers(self, Psurf=1013.25, Ptop=0.01, **kwargs): """ Compute scalars or coordinates associated to the vertical layers. Parameters ---------- grid_spec : CTMGrid object CTMGrid containing the information necessary to re-construct grid levels for a given model coordinate system. Returns ------- dictionary of vertical grid components, including eta (unitless), sigma (unitless), pressure (hPa), and altitude (km) on both layer centers and edges, ordered from bottom-to-top. Notes ----- For pure sigma grids, sigma coordinates are given by the esig (edges) and csig (centers). For both pure sigma and hybrid grids, pressures at layers edges L are calculated as follows: .. math:: P_e(L) = A_p(L) + B_p(L) * (P_{surf} - C_p) where :math:`P_{surf}`, :math:`P_{top}` Air pressures at the surface and the top of the modeled atmosphere (:attr:`Psurf` and :attr:`Ptop` attributes of the :class:`CTMGrid` instance). :math:`A_p(L)`, :math:`Bp(L)` Specified in the grid set-up (`Ap` and `Bp` attributes) for hybrid grids, or respectively equals :math:`P_{top}` and :attr:`esig` attribute for pure sigma grids. :math:`Cp(L)` equals :math:`P_{top}` for pure sigma grids or equals 0 for hybrid grids. Pressures at grid centers are averages of pressures at grid edges: .. math:: P_c(L) = (P_e(L) + P_e(L+1)) / 2 For hybrid grids, ETA coordinates of grid edges and grid centers are given by; .. math:: ETA_{e}(L) = (P_e(L) - P_{top}) / (P_{surf} - P_{top}) .. math:: ETA_{c}(L) = (P_c(L) - P_{top}) / (P_{surf} - P_{top}) Altitude values are fit using a 5th-degree polynomial; see `gridspec.prof_altitude` for more details. """ Psurf = np.asarray(Psurf) output_ndims = Psurf.ndim + 1 if output_ndims > 3: raise ValueError("`Psurf` argument must be a float or an array" " with <= 2 dimensions (or None)") # Compute all variables: takes not much memory, fast # and better for code reading SIGe = None SIGc = None ETAe = None ETAc = None if self.hybrid: try: Ap = broadcast_1d_array(self.Ap, output_ndims) Bp = broadcast_1d_array(self.Bp, output_ndims) except KeyError: raise ValueError("Impossible to compute vertical levels," " data is missing (Ap, Bp)") Cp = 0. else: try: Bp = SIGe = broadcast_1d_array(self.esig, output_ndims) SIGc = broadcast_1d_array(self.csig, output_ndims) except KeyError: raise ValueError("Impossible to compute vertical levels," " data is missing (esig, csig)") Ap = Cp = Ptop Pe = Ap + Bp * (Psurf - Cp) Pc = 0.5 * (Pe[0:-1] + Pe[1:]) if self.hybrid: ETAe = (Pe - Ptop)/(Psurf - Ptop) ETAc = (Pc - Ptop)/(Psurf - Ptop) else: SIGe = SIGe * np.ones_like(Psurf) SIGc = SIGc * np.ones_like(Psurf) Ze = prof_altitude(Pe, **kwargs) Zc = prof_altitude(Pc, **kwargs) all_vars = {'eta_edges': ETAe, 'eta_centers': ETAc, 'sigma_edges': SIGe, 'sigma_centers': SIGc, 'pressure_edges': Pe, 'pressure_centers': Pc, 'altitude_edges': Ze, 'altitude_centers': Zc} return all_vars
Calculate longitude-latitude grid for a specified resolution and configuration / ordering. Parameters ---------- rlon, rlat : float Resolution (in degrees) of longitude and latitude grids. halfpolar : bool (default=True) Polar grid boxes span half of rlat relative to the other grid cells. center180 : bool (default=True) Longitude grid should be centered at 180 degrees. def get_lonlat(self): """ Calculate longitude-latitude grid for a specified resolution and configuration / ordering. Parameters ---------- rlon, rlat : float Resolution (in degrees) of longitude and latitude grids. halfpolar : bool (default=True) Polar grid boxes span half of rlat relative to the other grid cells. center180 : bool (default=True) Longitude grid should be centered at 180 degrees. """ rlon, rlat = self.resolution # Compute number of grid cells in each direction Nlon = int(360. / rlon) Nlat = int(180. / rlat) + self.halfpolar # Compute grid cell edges elon = np.arange(Nlon + 1) * rlon - np.array(180.) elon -= rlon / 2. * self.center180 elat = np.arange(Nlat + 1) * rlat - np.array(90.) elat -= rlat / 2. * self.halfpolar elat[0] = -90. elat[-1] = 90. # Compute grid cell centers clon = (elon - (rlon / 2.))[1:] clat = np.arange(Nlat) * rlat - np.array(90.) # Fix grid boundaries if halfpolar if self.halfpolar: clat[0] = (elat[0] + elat[1]) / 2. clat[-1] = -clat[0] else: clat += (elat[1] - elat[0]) / 2. return { "lon_centers": clon, "lat_centers": clat, "lon_edges": elon, "lat_edges": elat }
existing directories where to search for jinja2 templates. The order is important. The first found template from the first found dir wins! def _get_template_dirs(): """existing directories where to search for jinja2 templates. The order is important. The first found template from the first found dir wins!""" return filter(lambda x: os.path.exists(x), [ # user dir os.path.join(os.path.expanduser('~'), '.py2pack', 'templates'), # system wide dir os.path.join('/', 'usr', 'share', 'py2pack', 'templates'), # usually inside the site-packages dir os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates'), ])
try to get a license from the classifiers def _license_from_classifiers(data): """try to get a license from the classifiers""" classifiers = data.get('classifiers', []) found_license = None for c in classifiers: if c.startswith("License :: OSI Approved :: "): found_license = c.replace("License :: OSI Approved :: ", "") return found_license
try to get SDPX license def _normalize_license(data): """try to get SDPX license""" license = data.get('license', None) if not license: # try to get license from classifiers license = _license_from_classifiers(data) if license: if license in SDPX_LICENSES.keys(): data['license'] = SDPX_LICENSES[license] else: data['license'] = "%s (FIXME:No SPDX)" % (license) else: data['license'] = ""
Wrap an IPython's Prompt class This is needed in order for Prompt to inject the correct escape sequences at the right positions for shell integrations. def wrap_prompts_class(Klass): """ Wrap an IPython's Prompt class This is needed in order for Prompt to inject the correct escape sequences at the right positions for shell integrations. """ try: from prompt_toolkit.token import ZeroWidthEscape except ImportError: return Klass class ITerm2IPythonPrompt(Klass): def in_prompt_tokens(self, cli=None): return [ (ZeroWidthEscape, last_status(self.shell)+BEFORE_PROMPT), ]+\ super(ITerm2IPythonPrompt, self).in_prompt_tokens(cli)+\ [(ZeroWidthEscape, AFTER_PROMPT)] return ITerm2IPythonPrompt
A generator which yields a list of all valid keys starting at the given `start` offset. If `start` is `None`, we will start from the root of the tree. def get_all_keys(self, start=None): """ A generator which yields a list of all valid keys starting at the given `start` offset. If `start` is `None`, we will start from the root of the tree. """ s = self.stream if not start: start = HEADER_SIZE + self.block_size * self.root_block s.seek(start) block_type = s.read(2) if block_type == LEAF: reader = LeafReader(self) num_keys = struct.unpack('>i', reader.read(4))[0] for _ in range(num_keys): cur_key = reader.read(self.key_size) # We to a tell/seek here so that the user can read from # the file while this loop is still being run cur_pos = s.tell() yield cur_key s.seek(cur_pos) length = sbon.read_varint(reader) reader.seek(length, 1) elif block_type == INDEX: (_, num_keys, first_child) = struct.unpack('>Bii', s.read(9)) children = [first_child] for _ in range(num_keys): # Skip the key field. _ = s.read(self.key_size) # Read pointer to the child block. next_child = struct.unpack('>i', s.read(4))[0] children.append(next_child) for child_loc in children: for key in self.get_all_keys(HEADER_SIZE + self.block_size * child_loc): yield key elif block_type == FREE: pass else: raise Exception('Unhandled block type: {}'.format(block_type))
Replace the `*` placeholder in a format string (fmt), so that struct.calcsize(fmt) is equal to the given `size` using the format following the placeholder. Raises `ValueError` if number of `*` is larger than 1. If no `*` in `fmt`, returns `fmt` without checking its size! Examples -------- >>> _replace_star('ii*fi', 40) 'ii7fi' def _replace_star(fmt, size): """ Replace the `*` placeholder in a format string (fmt), so that struct.calcsize(fmt) is equal to the given `size` using the format following the placeholder. Raises `ValueError` if number of `*` is larger than 1. If no `*` in `fmt`, returns `fmt` without checking its size! Examples -------- >>> _replace_star('ii*fi', 40) 'ii7fi' """ n_stars = fmt.count('*') if n_stars > 1: raise ValueError("More than one `*` in format (%s)." % fmt) if n_stars: i = fmt.find('*') s = struct.calcsize(fmt.replace(fmt[i:i + 2], '')) n = old_div((size - s), struct.calcsize(fmt[i + 1])) fmt = fmt.replace('*', str(n)) return fmt
Read pre- or suffix of line at current position with given format `fmt` (default 'i'). def _fix(self, fmt='i'): """ Read pre- or suffix of line at current position with given format `fmt` (default 'i'). """ fmt = self.endian + fmt fix = self.read(struct.calcsize(fmt)) if fix: return struct.unpack(fmt, fix)[0] else: raise EOFError
Return next unformatted "line". If format is given, unpack content, otherwise return byte string. def readline(self, fmt=None): """ Return next unformatted "line". If format is given, unpack content, otherwise return byte string. """ prefix_size = self._fix() if fmt is None: content = self.read(prefix_size) else: fmt = self.endian + fmt fmt = _replace_star(fmt, prefix_size) content = struct.unpack(fmt, self.read(prefix_size)) try: suffix_size = self._fix() except EOFError: # when endian is invalid and prefix_size > total file size suffix_size = -1 if prefix_size != suffix_size: raise IOError(_FIX_ERROR) return content
Skip the next line and returns position and size of line. Raises IOError if pre- and suffix of line do not match. def skipline(self): """ Skip the next line and returns position and size of line. Raises IOError if pre- and suffix of line do not match. """ position = self.tell() prefix = self._fix() self.seek(prefix, 1) # skip content suffix = self._fix() if prefix != suffix: raise IOError(_FIX_ERROR) return position, prefix
Write `line` (list of objects) with given `fmt` to file. The `line` will be chained if object is iterable (except for basestrings). def writeline(self, fmt, *args): """ Write `line` (list of objects) with given `fmt` to file. The `line` will be chained if object is iterable (except for basestrings). """ fmt = self.endian + fmt size = struct.calcsize(fmt) fix = struct.pack(self.endian + 'i', size) line = struct.pack(fmt, *args) self.write(fix) self.write(line) self.write(fix)
Write `lines` with given `format`. def writelines(self, lines, fmt): """ Write `lines` with given `format`. """ if isinstance(fmt, basestring): fmt = [fmt] * len(lines) for f, line in zip(fmt, lines): self.writeline(f, line, self.endian)
Read while the most significant bit is set, then put the 7 least significant bits of all read bytes together to create a number. def read_varint(stream): """Read while the most significant bit is set, then put the 7 least significant bits of all read bytes together to create a number. """ value = 0 while True: byte = ord(stream.read(1)) if not byte & 0b10000000: return value << 7 | byte value = value << 7 | (byte & 0b01111111)
Open a GEOS-Chem BPCH file output as an xarray Dataset. Parameters ---------- filename : string Path to the output file to read in. {tracerinfo,diaginfo}_file : string, optional Path to the metadata "info" .dat files which are used to decipher the metadata corresponding to each variable in the output dataset. If not provided, will look for them in the current directory or fall back on a generic set. fields : list, optional List of a subset of variable names to return. This can substantially improve read performance. Note that the field here is just the tracer name - not the category, e.g. 'O3' instead of 'IJ-AVG-$_O3'. categories : list, optional List a subset of variable categories to look through. This can substantially improve read performance. endian : {'=', '>', '<'}, optional Endianness of file on disk. By default, "big endian" (">") is assumed. decode_cf : bool Enforce CF conventions for variable names, units, and other metadata default_dtype : numpy.dtype, optional Default datatype for variables encoded in file on disk (single-precision float by default). memmap : bool Flag indicating that data should be memory-mapped from disk instead of eagerly loaded into memory dask : bool Flag indicating that data reading should be deferred (delayed) to construct a task-graph for later execution return_store : bool Also return the underlying DataStore to the user Returns ------- ds : xarray.Dataset Dataset containing the requested fields (or the entire file), with data contained in proxy containers for access later. store : xarray.AbstractDataStore Underlying DataStore which handles the loading and processing of bpch files on disk def open_bpchdataset(filename, fields=[], categories=[], tracerinfo_file='tracerinfo.dat', diaginfo_file='diaginfo.dat', endian=">", decode_cf=True, memmap=True, dask=True, return_store=False): """ Open a GEOS-Chem BPCH file output as an xarray Dataset. Parameters ---------- filename : string Path to the output file to read in. {tracerinfo,diaginfo}_file : string, optional Path to the metadata "info" .dat files which are used to decipher the metadata corresponding to each variable in the output dataset. If not provided, will look for them in the current directory or fall back on a generic set. fields : list, optional List of a subset of variable names to return. This can substantially improve read performance. Note that the field here is just the tracer name - not the category, e.g. 'O3' instead of 'IJ-AVG-$_O3'. categories : list, optional List a subset of variable categories to look through. This can substantially improve read performance. endian : {'=', '>', '<'}, optional Endianness of file on disk. By default, "big endian" (">") is assumed. decode_cf : bool Enforce CF conventions for variable names, units, and other metadata default_dtype : numpy.dtype, optional Default datatype for variables encoded in file on disk (single-precision float by default). memmap : bool Flag indicating that data should be memory-mapped from disk instead of eagerly loaded into memory dask : bool Flag indicating that data reading should be deferred (delayed) to construct a task-graph for later execution return_store : bool Also return the underlying DataStore to the user Returns ------- ds : xarray.Dataset Dataset containing the requested fields (or the entire file), with data contained in proxy containers for access later. store : xarray.AbstractDataStore Underlying DataStore which handles the loading and processing of bpch files on disk """ store = BPCHDataStore( filename, fields=fields, categories=categories, tracerinfo_file=tracerinfo_file, diaginfo_file=diaginfo_file, endian=endian, use_mmap=memmap, dask_delayed=dask ) ds = xr.Dataset.load_store(store) # Record what the file object underlying the store which we culled this # Dataset from is so that we can clean it up later ds._file_obj = store._bpch # Handle CF corrections if decode_cf: decoded_vars = OrderedDict() rename_dict = {} for v in ds.variables: cf_name = cf.get_valid_varname(v) rename_dict[v] = cf_name new_var = cf.enforce_cf_variable(ds[v]) decoded_vars[cf_name] = new_var ds = xr.Dataset(decoded_vars, attrs=ds.attrs.copy()) # ds.rename(rename_dict, inplace=True) # TODO: There's a bug with xr.decode_cf which eagerly loads data. # Re-enable this once that bug is fixed # Note that we do not need to decode the times because we explicitly # kept track of them as we parsed the data. # ds = xr.decode_cf(ds, decode_times=False) # Set attributes for CF conventions ts = get_timestamp() ds.attrs.update(dict( Conventions='CF1.6', source=filename, tracerinfo=tracerinfo_file, diaginfo=diaginfo_file, filetype=store._bpch.filetype, filetitle=store._bpch.filetitle, history=( "{}: Processed/loaded by xbpch-{} from {}" .format(ts, ver, filename) ), )) # To immediately load the data from the BPCHDataProxy paylods, need # to execute ds.data_vars for some reason... if return_store: return ds, store else: return ds
Open multiple bpch files as a single dataset. You must have dask installed for this to work, as this greatly simplifies issues relating to multi-file I/O. Also, please note that this is not a very performant routine. I/O is still limited by the fact that we need to manually scan/read through each bpch file so that we can figure out what its contents are, since that metadata isn't saved anywhere. So this routine will actually sequentially load Datasets for each bpch file, then concatenate them along the "time" axis. You may wish to simply process each file individually, coerce to NetCDF, and then ingest through xarray as normal. Parameters ---------- paths : list of strs Filenames to load; order doesn't matter as they will be lexicographically sorted before we read in the data concat_dim : str, default='time' Dimension to concatenate Datasets over. We default to "time" since this is how GEOS-Chem splits output files compat : str (optional) String indicating how to compare variables of the same name for potential conflicts when merging: - 'broadcast_equals': all values must be equal when variables are broadcast against each other to ensure common dimensions. - 'equals': all values and dimensions must be the same. - 'identical': all values, dimensions and attributes must be the same. - 'no_conflicts': only values which are not null in both datasets must be equal. The returned dataset then contains the combination of all non-null values. preprocess : callable (optional) A pre-processing function to apply to each Dataset prior to concatenation lock : False, True, or threading.Lock (optional) Passed to :py:func:`dask.array.from_array`. By default, xarray employs a per-variable lock when reading data from NetCDF files, but this model has not yet been extended or implemented for bpch files and so this is not actually used. However, it is likely necessary before dask's multi-threaded backend can be used **kwargs : optional Additional arguments to pass to :py:func:`xbpch.open_bpchdataset`. def open_mfbpchdataset(paths, concat_dim='time', compat='no_conflicts', preprocess=None, lock=None, **kwargs): """ Open multiple bpch files as a single dataset. You must have dask installed for this to work, as this greatly simplifies issues relating to multi-file I/O. Also, please note that this is not a very performant routine. I/O is still limited by the fact that we need to manually scan/read through each bpch file so that we can figure out what its contents are, since that metadata isn't saved anywhere. So this routine will actually sequentially load Datasets for each bpch file, then concatenate them along the "time" axis. You may wish to simply process each file individually, coerce to NetCDF, and then ingest through xarray as normal. Parameters ---------- paths : list of strs Filenames to load; order doesn't matter as they will be lexicographically sorted before we read in the data concat_dim : str, default='time' Dimension to concatenate Datasets over. We default to "time" since this is how GEOS-Chem splits output files compat : str (optional) String indicating how to compare variables of the same name for potential conflicts when merging: - 'broadcast_equals': all values must be equal when variables are broadcast against each other to ensure common dimensions. - 'equals': all values and dimensions must be the same. - 'identical': all values, dimensions and attributes must be the same. - 'no_conflicts': only values which are not null in both datasets must be equal. The returned dataset then contains the combination of all non-null values. preprocess : callable (optional) A pre-processing function to apply to each Dataset prior to concatenation lock : False, True, or threading.Lock (optional) Passed to :py:func:`dask.array.from_array`. By default, xarray employs a per-variable lock when reading data from NetCDF files, but this model has not yet been extended or implemented for bpch files and so this is not actually used. However, it is likely necessary before dask's multi-threaded backend can be used **kwargs : optional Additional arguments to pass to :py:func:`xbpch.open_bpchdataset`. """ from xarray.backends.api import _MultiFileCloser # TODO: Include file locks? # Check for dask dask = kwargs.pop('dask', False) if not dask: raise ValueError("Reading multiple files without dask is not supported") kwargs['dask'] = True # Add th if isinstance(paths, basestring): paths = sorted(glob(paths)) if not paths: raise IOError("No paths to files were passed into open_mfbpchdataset") datasets = [open_bpchdataset(filename, **kwargs) for filename in paths] bpch_objs = [ds._file_obj for ds in datasets] if preprocess is not None: datasets = [preprocess(ds) for ds in datasets] # Concatenate over time combined = xr.auto_combine(datasets, compat=compat, concat_dim=concat_dim) combined._file_obj = _MultiFileCloser(bpch_objs) combined.attrs = datasets[0].attrs ts = get_timestamp() fns_str = " ".join(paths) combined.attrs['history'] = ( "{}: Processed/loaded by xbpch-{} from {}" .format(ts, ver, fns_str) ) return combined
Write your forwards methods here. def forwards(self, orm): "Write your forwards methods here." # Note: Remember to use orm['appname.ModelName'] rather than "from appname.models..." for entry in orm['multilingual_news.NewsEntry'].objects.all(): self.migrate_placeholder( orm, entry, 'excerpt', 'multilingual_news_excerpt', 'excerpt') self.migrate_placeholder( orm, entry, 'content', 'multilingual_news_content', 'content')
Return a bytes string that displays image given by bytes b in the terminal If filename=None, the filename defaults to "Unnamed file" width and height are strings, following the format N: N character cells. Npx: N pixels. N%: N percent of the session's width or height. 'auto': The image's inherent size will be used to determine an appropriate dimension. preserve_aspect_ratio sets whether the aspect ratio of the image is preserved. The default (None) is True unless both width and height are set. See https://www.iterm2.com/documentation-images.html def image_bytes(b, filename=None, inline=1, width='auto', height='auto', preserve_aspect_ratio=None): """ Return a bytes string that displays image given by bytes b in the terminal If filename=None, the filename defaults to "Unnamed file" width and height are strings, following the format N: N character cells. Npx: N pixels. N%: N percent of the session's width or height. 'auto': The image's inherent size will be used to determine an appropriate dimension. preserve_aspect_ratio sets whether the aspect ratio of the image is preserved. The default (None) is True unless both width and height are set. See https://www.iterm2.com/documentation-images.html """ if preserve_aspect_ratio is None: if width != 'auto' and height != 'auto': preserve_aspect_ratio = False else: preserve_aspect_ratio = True data = { 'name': base64.b64encode((filename or 'Unnamed file').encode('utf-8')).decode('ascii'), 'inline': inline, 'size': len(b), 'base64_img': base64.b64encode(b).decode('ascii'), 'width': width, 'height': height, 'preserve_aspect_ratio': int(preserve_aspect_ratio), } # IMAGE_CODE is a string because bytes doesn't support formatting return IMAGE_CODE.format(**data).encode('ascii')
Display the image given by the bytes b in the terminal. If filename=None the filename defaults to "Unnamed file". width and height are strings, following the format N: N character cells. Npx: N pixels. N%: N percent of the session's width or height. 'auto': The image's inherent size will be used to determine an appropriate dimension. preserve_aspect_ratio sets whether the aspect ratio of the image is preserved. The default (None) is True unless both width and height are set. See https://www.iterm2.com/documentation-images.html def display_image_bytes(b, filename=None, inline=1, width='auto', height='auto', preserve_aspect_ratio=None): """ Display the image given by the bytes b in the terminal. If filename=None the filename defaults to "Unnamed file". width and height are strings, following the format N: N character cells. Npx: N pixels. N%: N percent of the session's width or height. 'auto': The image's inherent size will be used to determine an appropriate dimension. preserve_aspect_ratio sets whether the aspect ratio of the image is preserved. The default (None) is True unless both width and height are set. See https://www.iterm2.com/documentation-images.html """ sys.stdout.buffer.write(image_bytes(b, filename=filename, inline=inline, width=width, height=height, preserve_aspect_ratio=preserve_aspect_ratio)) sys.stdout.write('\n')
Display an image in the terminal. A newline is not printed. width and height are strings, following the format N: N character cells. Npx: N pixels. N%: N percent of the session's width or height. 'auto': The image's inherent size will be used to determine an appropriate dimension. preserve_aspect_ratio sets whether the aspect ratio of the image is preserved. The default (None) is True unless both width and height are set. See https://www.iterm2.com/documentation-images.html def display_image_file(fn, width='auto', height='auto', preserve_aspect_ratio=None): """ Display an image in the terminal. A newline is not printed. width and height are strings, following the format N: N character cells. Npx: N pixels. N%: N percent of the session's width or height. 'auto': The image's inherent size will be used to determine an appropriate dimension. preserve_aspect_ratio sets whether the aspect ratio of the image is preserved. The default (None) is True unless both width and height are set. See https://www.iterm2.com/documentation-images.html """ with open(os.path.realpath(os.path.expanduser(fn)), 'rb') as f: sys.stdout.buffer.write(image_bytes(f.read(), filename=fn, width=width, height=height, preserve_aspect_ratio=preserve_aspect_ratio))
Get requirements from pip requirement files. def get_requirements(*args): """Get requirements from pip requirement files.""" requirements = set() contents = get_contents(*args) for line in contents.splitlines(): # Strip comments. line = re.sub(r'^#.*|\s#.*', '', line) # Ignore empty lines if line and not line.isspace(): requirements.add(re.sub(r'\s+', '', line)) return sorted(requirements)
Gets a list of all known bank holidays, optionally filtered by division and/or year :param division: see division constants; defaults to common holidays :param year: defaults to all available years :return: list of dicts with titles, dates, etc def get_holidays(self, division=None, year=None): """ Gets a list of all known bank holidays, optionally filtered by division and/or year :param division: see division constants; defaults to common holidays :param year: defaults to all available years :return: list of dicts with titles, dates, etc """ if division: holidays = self.data[division] else: holidays = self.data[self.ENGLAND_AND_WALES] dates_in_common = six.moves.reduce( set.intersection, (set(map(lambda holiday: holiday['date'], division_holidays)) for division, division_holidays in six.iteritems(self.data)) ) holidays = filter(lambda holiday: holiday['date'] in dates_in_common, holidays) if year: holidays = filter(lambda holiday: holiday['date'].year == year, holidays) return list(holidays)
Returns the next known bank holiday :param division: see division constants; defaults to common holidays :param date: search starting from this date; defaults to today :return: dict def get_next_holiday(self, division=None, date=None): """ Returns the next known bank holiday :param division: see division constants; defaults to common holidays :param date: search starting from this date; defaults to today :return: dict """ date = date or datetime.date.today() for holiday in self.get_holidays(division=division): if holiday['date'] > date: return holiday
True if the date is a known bank holiday :param date: the date to check :param division: see division constants; defaults to common holidays :return: bool def is_holiday(self, date, division=None): """ True if the date is a known bank holiday :param date: the date to check :param division: see division constants; defaults to common holidays :return: bool """ return date in (holiday['date'] for holiday in self.get_holidays(division=division))
Returns the next work day, skipping weekends and bank holidays :param division: see division constants; defaults to common holidays :param date: search starting from this date; defaults to today :return: datetime.date; NB: get_next_holiday returns a dict def get_next_work_day(self, division=None, date=None): """ Returns the next work day, skipping weekends and bank holidays :param division: see division constants; defaults to common holidays :param date: search starting from this date; defaults to today :return: datetime.date; NB: get_next_holiday returns a dict """ date = date or datetime.date.today() one_day = datetime.timedelta(days=1) holidays = set(holiday['date'] for holiday in self.get_holidays(division=division)) while True: date += one_day if date.weekday() not in self.weekend and date not in holidays: return date
True if the date is not a weekend or a known bank holiday :param date: the date to check :param division: see division constants; defaults to common holidays :return: bool def is_work_day(self, date, division=None): """ True if the date is not a weekend or a known bank holiday :param date: the date to check :param division: see division constants; defaults to common holidays :return: bool """ return date.weekday() not in self.weekend and date not in ( holiday['date'] for holiday in self.get_holidays(division=division) )
Generator which yields a set of (rx, ry) tuples which describe all regions for which the world has tile data def get_all_regions_with_tiles(self): """ Generator which yields a set of (rx, ry) tuples which describe all regions for which the world has tile data """ for key in self.get_all_keys(): (layer, rx, ry) = struct.unpack('>BHH', key) if layer == 1: yield (rx, ry)
Returns the coordinates of the given entity UUID inside this world, or `None` if the UUID is not found. def get_entity_uuid_coords(self, uuid): """ Returns the coordinates of the given entity UUID inside this world, or `None` if the UUID is not found. """ if uuid in self._entity_to_region_map: coords = self._entity_to_region_map[uuid] entities = self.get_entities(*coords) for entity in entities: if 'uniqueId' in entity.data and entity.data['uniqueId'] == uuid: return tuple(entity.data['tilePosition']) return None
A dict whose keys are the UUIDs (or just IDs, in some cases) of entities, and whose values are the `(rx, ry)` coordinates in which that entity can be found. This can be used to easily locate particular entities inside the world. def _entity_to_region_map(self): """ A dict whose keys are the UUIDs (or just IDs, in some cases) of entities, and whose values are the `(rx, ry)` coordinates in which that entity can be found. This can be used to easily locate particular entities inside the world. """ entity_to_region = {} for key in self.get_all_keys(): layer, rx, ry = struct.unpack('>BHH', key) if layer != 4: continue stream = io.BytesIO(self.get(layer, rx, ry)) num_entities = sbon.read_varint(stream) for _ in range(num_entities): uuid = sbon.read_string(stream) if uuid in entity_to_region: raise ValueError('Duplicate UUID {}'.format(uuid)) entity_to_region[uuid] = (rx, ry) return entity_to_region
Convert a string into a fuzzy regular expression pattern. :param pattern: The input pattern (a string). :returns: A compiled regular expression object. This function works by adding ``.*`` between each of the characters in the input pattern and compiling the resulting expression into a case insensitive regular expression. def create_fuzzy_pattern(pattern): """ Convert a string into a fuzzy regular expression pattern. :param pattern: The input pattern (a string). :returns: A compiled regular expression object. This function works by adding ``.*`` between each of the characters in the input pattern and compiling the resulting expression into a case insensitive regular expression. """ return re.compile(".*".join(map(re.escape, pattern)), re.IGNORECASE)
A list of :class:`PasswordEntry` objects that don't match the exclude list. def filtered_entries(self): """A list of :class:`PasswordEntry` objects that don't match the exclude list.""" return [ e for e in self.entries if not any(fnmatch.fnmatch(e.name.lower(), p.lower()) for p in self.exclude_list) ]
Perform a "fuzzy" search that matches the given characters in the given order. :param filters: The pattern(s) to search for. :returns: The matched password names (a list of strings). def fuzzy_search(self, *filters): """ Perform a "fuzzy" search that matches the given characters in the given order. :param filters: The pattern(s) to search for. :returns: The matched password names (a list of strings). """ matches = [] logger.verbose( "Performing fuzzy search on %s (%s) ..", pluralize(len(filters), "pattern"), concatenate(map(repr, filters)) ) patterns = list(map(create_fuzzy_pattern, filters)) for entry in self.filtered_entries: if all(p.search(entry.name) for p in patterns): matches.append(entry) logger.log( logging.INFO if matches else logging.VERBOSE, "Matched %s using fuzzy search.", pluralize(len(matches), "password"), ) return matches
Select a password from the available choices. :param arguments: Refer to :func:`smart_search()`. :returns: The name of a password (a string) or :data:`None` (when no password matched the given `arguments`). def select_entry(self, *arguments): """ Select a password from the available choices. :param arguments: Refer to :func:`smart_search()`. :returns: The name of a password (a string) or :data:`None` (when no password matched the given `arguments`). """ matches = self.smart_search(*arguments) if len(matches) > 1: logger.info("More than one match, prompting for choice ..") labels = [entry.name for entry in matches] return matches[labels.index(prompt_for_choice(labels))] else: logger.info("Matched one entry: %s", matches[0].name) return matches[0]
Perform a simple search for case insensitive substring matches. :param keywords: The string(s) to search for. :returns: The matched password names (a generator of strings). Only passwords whose names matches *all* of the given keywords are returned. def simple_search(self, *keywords): """ Perform a simple search for case insensitive substring matches. :param keywords: The string(s) to search for. :returns: The matched password names (a generator of strings). Only passwords whose names matches *all* of the given keywords are returned. """ matches = [] keywords = [kw.lower() for kw in keywords] logger.verbose( "Performing simple search on %s (%s) ..", pluralize(len(keywords), "keyword"), concatenate(map(repr, keywords)), ) for entry in self.filtered_entries: normalized = entry.name.lower() if all(kw in normalized for kw in keywords): matches.append(entry) logger.log( logging.INFO if matches else logging.VERBOSE, "Matched %s using simple search.", pluralize(len(matches), "password"), ) return matches
Perform a smart search on the given keywords or patterns. :param arguments: The keywords or patterns to search for. :returns: The matched password names (a list of strings). :raises: The following exceptions can be raised: - :exc:`.NoMatchingPasswordError` when no matching passwords are found. - :exc:`.EmptyPasswordStoreError` when the password store is empty. This method first tries :func:`simple_search()` and if that doesn't produce any matches it will fall back to :func:`fuzzy_search()`. If no matches are found an exception is raised (see above). def smart_search(self, *arguments): """ Perform a smart search on the given keywords or patterns. :param arguments: The keywords or patterns to search for. :returns: The matched password names (a list of strings). :raises: The following exceptions can be raised: - :exc:`.NoMatchingPasswordError` when no matching passwords are found. - :exc:`.EmptyPasswordStoreError` when the password store is empty. This method first tries :func:`simple_search()` and if that doesn't produce any matches it will fall back to :func:`fuzzy_search()`. If no matches are found an exception is raised (see above). """ matches = self.simple_search(*arguments) if not matches: logger.verbose("Falling back from substring search to fuzzy search ..") matches = self.fuzzy_search(*arguments) if not matches: if len(self.filtered_entries) > 0: raise NoMatchingPasswordError( format("No passwords matched the given arguments! (%s)", concatenate(map(repr, arguments))) ) else: msg = "You don't have any passwords yet! (no *.gpg files found)" raise EmptyPasswordStoreError(msg) return matches
A list of :class:`PasswordEntry` objects. def entries(self): """A list of :class:`PasswordEntry` objects.""" passwords = [] for store in self.stores: passwords.extend(store.entries) return natsort(passwords, key=lambda e: e.name)
An execution context created using :mod:`executor.contexts`. The value of :attr:`context` defaults to a :class:`~executor.contexts.LocalContext` object with the following characteristics: - The working directory of the execution context is set to the value of :attr:`directory`. - The environment variable given by :data:`DIRECTORY_VARIABLE` is set to the value of :attr:`directory`. :raises: :exc:`.MissingPasswordStoreError` when :attr:`directory` doesn't exist. def context(self): """ An execution context created using :mod:`executor.contexts`. The value of :attr:`context` defaults to a :class:`~executor.contexts.LocalContext` object with the following characteristics: - The working directory of the execution context is set to the value of :attr:`directory`. - The environment variable given by :data:`DIRECTORY_VARIABLE` is set to the value of :attr:`directory`. :raises: :exc:`.MissingPasswordStoreError` when :attr:`directory` doesn't exist. """ # Make sure the directory exists. self.ensure_directory_exists() # Prepare the environment variables. environment = {DIRECTORY_VARIABLE: self.directory} try: # Try to enable the GPG agent in headless sessions. environment.update(get_gpg_variables()) except Exception: # If we failed then let's at least make sure that the # $GPG_TTY environment variable is set correctly. environment.update(GPG_TTY=execute("tty", capture=True, check=False, tty=True, silent=True)) return LocalContext(directory=self.directory, environment=environment)
Normalize the value of :attr:`directory` when it's set. def directory(self, value): """Normalize the value of :attr:`directory` when it's set.""" # Normalize the value of `directory'. set_property(self, "directory", parse_path(value)) # Clear the computed values of `context' and `entries'. clear_property(self, "context") clear_property(self, "entries")
A list of :class:`PasswordEntry` objects. def entries(self): """A list of :class:`PasswordEntry` objects.""" timer = Timer() passwords = [] logger.info("Scanning %s ..", format_path(self.directory)) listing = self.context.capture("find", "-type", "f", "-name", "*.gpg", "-print0") for filename in split(listing, "\0"): basename, extension = os.path.splitext(filename) if extension == ".gpg": # We use os.path.normpath() to remove the leading `./' prefixes # that `find' adds because it searches the working directory. passwords.append(PasswordEntry(name=os.path.normpath(basename), store=self)) logger.verbose("Found %s in %s.", pluralize(len(passwords), "password"), timer) return natsort(passwords, key=lambda e: e.name)
Make sure :attr:`directory` exists. :raises: :exc:`.MissingPasswordStoreError` when the password storage directory doesn't exist. def ensure_directory_exists(self): """ Make sure :attr:`directory` exists. :raises: :exc:`.MissingPasswordStoreError` when the password storage directory doesn't exist. """ if not os.path.isdir(self.directory): msg = "The password storage directory doesn't exist! (%s)" raise MissingPasswordStoreError(msg % self.directory)
Format :attr:`text` for viewing on a terminal. :param include_password: :data:`True` to include the password in the formatted text, :data:`False` to exclude the password from the formatted text. :param use_colors: :data:`True` to use ANSI escape sequences, :data:`False` otherwise. When this is :data:`None` :func:`~humanfriendly.terminal.terminal_supports_colors()` will be used to detect whether ANSI escape sequences are supported. :param padding: :data:`True` to add empty lines before and after the entry and indent the entry's text with two spaces, :data:`False` to skip the padding. :param filters: An iterable of regular expression patterns (defaults to an empty tuple). If a line in the entry's text matches one of these patterns it won't be shown on the terminal. :returns: The formatted entry (a string). def format_text(self, include_password=True, use_colors=None, padding=True, filters=()): """ Format :attr:`text` for viewing on a terminal. :param include_password: :data:`True` to include the password in the formatted text, :data:`False` to exclude the password from the formatted text. :param use_colors: :data:`True` to use ANSI escape sequences, :data:`False` otherwise. When this is :data:`None` :func:`~humanfriendly.terminal.terminal_supports_colors()` will be used to detect whether ANSI escape sequences are supported. :param padding: :data:`True` to add empty lines before and after the entry and indent the entry's text with two spaces, :data:`False` to skip the padding. :param filters: An iterable of regular expression patterns (defaults to an empty tuple). If a line in the entry's text matches one of these patterns it won't be shown on the terminal. :returns: The formatted entry (a string). """ # Determine whether we can use ANSI escape sequences. if use_colors is None: use_colors = terminal_supports_colors() # Extract the password (first line) from the entry. lines = self.text.splitlines() password = lines.pop(0).strip() # Compile the given patterns to case insensitive regular expressions # and use them to ignore lines that match any of the given filters. patterns = [coerce_pattern(f, re.IGNORECASE) for f in filters] lines = [l for l in lines if not any(p.search(l) for p in patterns)] text = trim_empty_lines("\n".join(lines)) # Include the password in the formatted text? if include_password: text = "Password: %s\n%s" % (password, text) # Add the name to the entry (only when there's something to show). if text and not text.isspace(): title = " / ".join(split(self.name, "/")) if use_colors: title = ansi_wrap(title, bold=True) text = "%s\n\n%s" % (title, text) # Highlight the entry's text using ANSI escape sequences. lines = [] for line in text.splitlines(): # Check for a "Key: Value" line. match = KEY_VALUE_PATTERN.match(line) if match: key = "%s:" % match.group(1).strip() value = match.group(2).strip() if use_colors: # Highlight the key. key = ansi_wrap(key, color=HIGHLIGHT_COLOR) # Underline hyperlinks in the value. tokens = value.split() for i in range(len(tokens)): if "://" in tokens[i]: tokens[i] = ansi_wrap(tokens[i], underline=True) # Replace the line with a highlighted version. line = key + " " + " ".join(tokens) if padding: line = " " + line lines.append(line) text = "\n".join(lines) text = trim_empty_lines(text) if text and padding: text = "\n%s\n" % text return text
Read an output's diaginfo.dat file and parse into a DataFrame for use in selecting and parsing categories. Parameters ---------- diaginfo_file : str Path to diaginfo.dat Returns ------- DataFrame containing the category information. def get_diaginfo(diaginfo_file): """ Read an output's diaginfo.dat file and parse into a DataFrame for use in selecting and parsing categories. Parameters ---------- diaginfo_file : str Path to diaginfo.dat Returns ------- DataFrame containing the category information. """ widths = [rec.width for rec in diag_recs] col_names = [rec.name for rec in diag_recs] dtypes = [rec.type for rec in diag_recs] usecols = [name for name in col_names if not name.startswith('-')] diag_df = pd.read_fwf(diaginfo_file, widths=widths, names=col_names, dtypes=dtypes, comment="#", header=None, usecols=usecols) diag_desc = {diag.name: diag.desc for diag in diag_recs if not diag.name.startswith('-')} return diag_df, diag_desc
Read an output's tracerinfo.dat file and parse into a DataFrame for use in selecting and parsing categories. Parameters ---------- tracerinfo_file : str Path to tracerinfo.dat Returns ------- DataFrame containing the tracer information. def get_tracerinfo(tracerinfo_file): """ Read an output's tracerinfo.dat file and parse into a DataFrame for use in selecting and parsing categories. Parameters ---------- tracerinfo_file : str Path to tracerinfo.dat Returns ------- DataFrame containing the tracer information. """ widths = [rec.width for rec in tracer_recs] col_names = [rec.name for rec in tracer_recs] dtypes = [rec.type for rec in tracer_recs] usecols = [name for name in col_names if not name.startswith('-')] tracer_df = pd.read_fwf(tracerinfo_file, widths=widths, names=col_names, dtypes=dtypes, comment="#", header=None, usecols=usecols) # Check an edge case related to a bug in GEOS-Chem v12.0.3 which # erroneously dropped short/long tracer names in certain tracerinfo.dat outputs. # What we do here is figure out which rows were erroneously processed (they'll # have NaNs in them) and raise a warning if there are any na_free = tracer_df.dropna(subset=['tracer', 'scale']) only_na = tracer_df[~tracer_df.index.isin(na_free.index)] if len(only_na) > 0: warn("At least one row in {} wasn't decoded correctly; we strongly" " recommend you manually check that file to see that all" " tracers are properly recorded." .format(tracerinfo_file)) tracer_desc = {tracer.name: tracer.desc for tracer in tracer_recs if not tracer.name.startswith('-')} # Process some of the information about which variables are hydrocarbons # and chemical tracers versus other diagnostics. def _assign_hydrocarbon(row): if row['C'] != 1: row['hydrocarbon'] = True row['molwt'] = C_MOLECULAR_WEIGHT else: row['hydrocarbon'] = False return row tracer_df = ( tracer_df .apply(_assign_hydrocarbon, axis=1) .assign(chemical=lambda x: x['molwt'].astype(bool)) ) return tracer_df, tracer_desc
Read a chunk of data from a bpch output file. Parameters ---------- filename : str Path to file on disk containing the data file_position : int Position (bytes) where desired data chunk begins shape : tuple of ints Resultant (n-dimensional) shape of requested data; the chunk will be read sequentially from disk and then re-shaped dtype : dtype Dtype of data; for best results, pass a dtype which includes an endian indicator, e.g. `dtype = np.dtype('>f4')` endian : str Endianness of data; should be consistent with `dtype` use_mmap : bool Memory map the chunk of data to the file on disk, else read immediately Returns ------- Array with shape `shape` and dtype `dtype` containing the requested chunk of data from `filename`. def read_from_bpch(filename, file_position, shape, dtype, endian, use_mmap=False): """ Read a chunk of data from a bpch output file. Parameters ---------- filename : str Path to file on disk containing the data file_position : int Position (bytes) where desired data chunk begins shape : tuple of ints Resultant (n-dimensional) shape of requested data; the chunk will be read sequentially from disk and then re-shaped dtype : dtype Dtype of data; for best results, pass a dtype which includes an endian indicator, e.g. `dtype = np.dtype('>f4')` endian : str Endianness of data; should be consistent with `dtype` use_mmap : bool Memory map the chunk of data to the file on disk, else read immediately Returns ------- Array with shape `shape` and dtype `dtype` containing the requested chunk of data from `filename`. """ offset = file_position + 4 if use_mmap: d = np.memmap(filename, dtype=dtype, mode='r', shape=shape, offset=offset, order='F') else: with FortranFile(filename, 'rb', endian) as ff: ff.seek(file_position) d = np.array(ff.readline('*f')) d = d.reshape(shape, order='F') # As a sanity check, *be sure* that the resulting data block has the # correct shape, and fail early if it doesn't. if (d.shape != shape): raise IOError("Data chunk read from {} does not have the right shape," " (expected {} but got {})" .format(filename, shape, d.shape)) return d
Helper function to load the data referenced by this bundle. def _read(self): """ Helper function to load the data referenced by this bundle. """ if self._dask: d = da.from_delayed( delayed(read_from_bpch, )( self.filename, self.file_position, self.shape, self.dtype, self.endian, use_mmap=self._mmap ), self.shape, self.dtype ) else: d = read_from_bpch( self.filename, self.file_position, self.shape, self.dtype, self.endian, use_mmap=self._mmap ) return d
Close this bpch file. def close(self): """ Close this bpch file. """ if not self.fp.closed: for v in list(self.var_data): del self.var_data[v] self.fp.close()
Read the main metadata packaged within a bpch file, indicating the output filetype and its title. def _read_metadata(self): """ Read the main metadata packaged within a bpch file, indicating the output filetype and its title. """ filetype = self.fp.readline().strip() filetitle = self.fp.readline().strip() # Decode to UTF string, if possible try: filetype = str(filetype, 'utf-8') filetitle = str(filetitle, 'utf-8') except: # TODO: Handle this edge-case of converting file metadata more elegantly. pass self.__setattr__('filetype', filetype) self.__setattr__('filetitle', filetitle)
Process the header information (data model / grid spec) def _read_header(self): """ Process the header information (data model / grid spec) """ self._header_pos = self.fp.tell() line = self.fp.readline('20sffii') modelname, res0, res1, halfpolar, center180 = line self._attributes.update({ "modelname": str(modelname, 'utf-8').strip(), "halfpolar": halfpolar, "center180": center180, "res": (res0, res1) }) self.__setattr__('modelname', modelname) self.__setattr__('res', (res0, res1)) self.__setattr__('halfpolar', halfpolar) self.__setattr__('center180', center180) # Re-wind the file self.fp.seek(self._header_pos)
Iterate over the block of this bpch file and return handlers in the form of `BPCHDataBundle`s for access to the data contained therein. def _read_var_data(self): """ Iterate over the block of this bpch file and return handlers in the form of `BPCHDataBundle`s for access to the data contained therein. """ var_bundles = OrderedDict() var_attrs = OrderedDict() n_vars = 0 while self.fp.tell() < self.fsize: var_attr = OrderedDict() # read first and second header lines line = self.fp.readline('20sffii') modelname, res0, res1, halfpolar, center180 = line line = self.fp.readline('40si40sdd40s7i') category_name, number, unit, tau0, tau1, reserved = line[:6] dim0, dim1, dim2, dim3, dim4, dim5, skip = line[6:] var_attr['number'] = number # Decode byte-strings to utf-8 category_name = str(category_name, 'utf-8') var_attr['category'] = category_name.strip() unit = str(unit, 'utf-8') # get additional metadata from tracerinfo / diaginfo try: cat_df = self.diaginfo_df[ self.diaginfo_df.name == category_name.strip() ] # TODO: Safer logic for handling case where more than one # tracer metadata match was made # if len(cat_df > 1): # raise ValueError( # "More than one category matching {} found in " # "diaginfo.dat".format( # category_name.strip() # ) # ) # Safe now to select the only row in the DataFrame cat = cat_df.T.squeeze() tracer_num = int(cat.offset) + int(number) diag_df = self.tracerinfo_df[ self.tracerinfo_df.tracer == tracer_num ] # TODO: Safer logic for handling case where more than one # tracer metadata match was made # if len(diag_df > 1): # raise ValueError( # "More than one tracer matching {:d} found in " # "tracerinfo.dat".format(tracer_num) # ) # Safe now to select only row in the DataFrame diag = diag_df.T.squeeze() diag_attr = diag.to_dict() if not unit.strip(): # unit may be empty in bpch unit = diag_attr['unit'] # but not in tracerinfo var_attr.update(diag_attr) except: diag = {'name': '', 'scale': 1} var_attr.update(diag) var_attr['unit'] = unit vname = diag['name'] fullname = category_name.strip() + "_" + vname # parse metadata, get data or set a data proxy if dim2 == 1: data_shape = (dim0, dim1) # 2D field else: data_shape = (dim0, dim1, dim2) var_attr['original_shape'] = data_shape # Add proxy time dimension to shape data_shape = tuple([1, ] + list(data_shape)) origin = (dim3, dim4, dim5) var_attr['origin'] = origin timelo, timehi = cf.tau2time(tau0), cf.tau2time(tau1) pos = self.fp.tell() # Note that we don't pass a dtype, and assume everything is # single-fp floats with the correct endian, as hard-coded var_bundle = BPCHDataBundle( data_shape, self.endian, self.filename, pos, [timelo, timehi], metadata=var_attr, use_mmap=self.use_mmap, dask_delayed=self.dask_delayed ) self.fp.skipline() # Save the data as a "bundle" for concatenating in the final step if fullname in var_bundles: var_bundles[fullname].append(var_bundle) else: var_bundles[fullname] = [var_bundle, ] var_attrs[fullname] = var_attr n_vars += 1 self.var_data = var_bundles self.var_attrs = var_attrs
Broadcast 1-d array `arr` to `ndim` dimensions on the first axis (`axis`=0) or on the last axis (`axis`=1). Useful for 'outer' calculations involving 1-d arrays that are related to different axes on a multidimensional grid. def broadcast_1d_array(arr, ndim, axis=1): """ Broadcast 1-d array `arr` to `ndim` dimensions on the first axis (`axis`=0) or on the last axis (`axis`=1). Useful for 'outer' calculations involving 1-d arrays that are related to different axes on a multidimensional grid. """ ext_arr = arr for i in range(ndim - 1): ext_arr = np.expand_dims(ext_arr, axis=axis) return ext_arr
Return the current timestamp in machine local time. Parameters: ----------- time, date : Boolean Flag to include the time or date components, respectively, in the output. fmt : str, optional If passed, will override the time/date choice and use as the format string passed to `strftime`. def get_timestamp(time=True, date=True, fmt=None): """ Return the current timestamp in machine local time. Parameters: ----------- time, date : Boolean Flag to include the time or date components, respectively, in the output. fmt : str, optional If passed, will override the time/date choice and use as the format string passed to `strftime`. """ time_format = "%H:%M:%S" date_format = "%m-%d-%Y" if fmt is None: if time and date: fmt = time_format + " " + date_format elif time: fmt = time_format elif date: fmt = date_format else: raise ValueError("One of `date` or `time` must be True!") return datetime.now().strftime(fmt)
This is a temporary hot-fix to handle the way metadata is encoded when we read data directly from bpch files. It removes the 'scale_factor' and 'units' attributes we encode with the data we ingest, converts the 'hydrocarbon' and 'chemical' attribute to a binary integer instead of a boolean, and removes the 'units' attribute from the "time" dimension since that too is implicitly encoded. In future versions of this library, when upstream issues in decoding data wrapped in dask arrays is fixed, this won't be necessary and will be removed. def fix_attr_encoding(ds): """ This is a temporary hot-fix to handle the way metadata is encoded when we read data directly from bpch files. It removes the 'scale_factor' and 'units' attributes we encode with the data we ingest, converts the 'hydrocarbon' and 'chemical' attribute to a binary integer instead of a boolean, and removes the 'units' attribute from the "time" dimension since that too is implicitly encoded. In future versions of this library, when upstream issues in decoding data wrapped in dask arrays is fixed, this won't be necessary and will be removed. """ def _maybe_del_attr(da, attr): """ Possibly delete an attribute on a DataArray if it's present """ if attr in da.attrs: del da.attrs[attr] return da def _maybe_decode_attr(da, attr): # TODO: Fix this so that bools get written as attributes just fine """ Possibly coerce an attribute on a DataArray to an easier type to write to disk. """ # bool -> int if (attr in da.attrs) and (type(da.attrs[attr] == bool)): da.attrs[attr] = int(da.attrs[attr]) return da for v in ds.data_vars: da = ds[v] da = _maybe_del_attr(da, 'scale_factor') da = _maybe_del_attr(da, 'units') da = _maybe_decode_attr(da, 'hydrocarbon') da = _maybe_decode_attr(da, 'chemical') # Also delete attributes on time. if hasattr(ds, 'time'): times = ds.time times = _maybe_del_attr(times, 'units') return ds
Shell sequence to be run after the command output. The ``command_status`` should be in the range 0-255. def after_output(command_status): """ Shell sequence to be run after the command output. The ``command_status`` should be in the range 0-255. """ if command_status not in range(256): raise ValueError("command_status must be an integer in the range 0-255") sys.stdout.write(AFTER_OUTPUT.format(command_status=command_status)) # Flushing is important as the command timing feature maybe based on # AFTER_OUTPUT in the future. sys.stdout.flush()
Write your forwards methods here. def forwards(self, orm): "Write your forwards methods here." # Note: Remember to use orm['appname.ModelName'] rather than "from appname.models..." for entry_title in orm.NewsEntryTitle.objects.all(): entry = NewsEntry.objects.get(pk=entry_title.entry.pk) entry.translate(entry_title.language) entry.title = entry_title.title entry.slug = entry_title.slug entry.is_published = entry_title.is_published entry.save()