text
stringlengths 81
112k
|
|---|
Render entities in text.
text (unicode): Original text.
spans (list): Individual entity spans and their start, end and label.
title (unicode or None): Document title set in Doc.user_data['title'].
def render_ents(self, text, spans, title):
"""Render entities in text.
text (unicode): Original text.
spans (list): Individual entity spans and their start, end and label.
title (unicode or None): Document title set in Doc.user_data['title'].
"""
markup = ""
offset = 0
for span in spans:
label = span["label"]
start = span["start"]
end = span["end"]
entity = escape_html(text[start:end])
fragments = text[offset:start].split("\n")
for i, fragment in enumerate(fragments):
markup += escape_html(fragment)
if len(fragments) > 1 and i != len(fragments) - 1:
markup += "</br>"
if self.ents is None or label.upper() in self.ents:
color = self.colors.get(label.upper(), self.default_color)
ent_settings = {"label": label, "text": entity, "bg": color}
if self.direction == "rtl":
markup += TPL_ENT_RTL.format(**ent_settings)
else:
markup += TPL_ENT.format(**ent_settings)
else:
markup += entity
offset = end
markup += escape_html(text[offset:])
markup = TPL_ENTS.format(content=markup, dir=self.direction)
if title:
markup = TPL_TITLE.format(title=title) + markup
return markup
|
Merge noun chunks into a single token.
doc (Doc): The Doc object.
RETURNS (Doc): The Doc object with merged noun chunks.
DOCS: https://spacy.io/api/pipeline-functions#merge_noun_chunks
def merge_noun_chunks(doc):
"""Merge noun chunks into a single token.
doc (Doc): The Doc object.
RETURNS (Doc): The Doc object with merged noun chunks.
DOCS: https://spacy.io/api/pipeline-functions#merge_noun_chunks
"""
if not doc.is_parsed:
return doc
with doc.retokenize() as retokenizer:
for np in doc.noun_chunks:
attrs = {"tag": np.root.tag, "dep": np.root.dep}
retokenizer.merge(np, attrs=attrs)
return doc
|
Merge entities into a single token.
doc (Doc): The Doc object.
RETURNS (Doc): The Doc object with merged entities.
DOCS: https://spacy.io/api/pipeline-functions#merge_entities
def merge_entities(doc):
"""Merge entities into a single token.
doc (Doc): The Doc object.
RETURNS (Doc): The Doc object with merged entities.
DOCS: https://spacy.io/api/pipeline-functions#merge_entities
"""
with doc.retokenize() as retokenizer:
for ent in doc.ents:
attrs = {"tag": ent.root.tag, "dep": ent.root.dep, "ent_type": ent.label}
retokenizer.merge(ent, attrs=attrs)
return doc
|
Merge subtokens into a single token.
doc (Doc): The Doc object.
label (unicode): The subtoken dependency label.
RETURNS (Doc): The Doc object with merged subtokens.
DOCS: https://spacy.io/api/pipeline-functions#merge_subtokens
def merge_subtokens(doc, label="subtok"):
"""Merge subtokens into a single token.
doc (Doc): The Doc object.
label (unicode): The subtoken dependency label.
RETURNS (Doc): The Doc object with merged subtokens.
DOCS: https://spacy.io/api/pipeline-functions#merge_subtokens
"""
merger = Matcher(doc.vocab)
merger.add("SUBTOK", None, [{"DEP": label, "op": "+"}])
matches = merger(doc)
spans = [doc[start : end + 1] for _, start, end in matches]
with doc.retokenize() as retokenizer:
for span in spans:
retokenizer.merge(span)
return doc
|
Train or update a spaCy model. Requires data to be formatted in spaCy's
JSON format. To convert data from other formats, use the `spacy convert`
command.
def train(
lang,
output_path,
train_path,
dev_path,
raw_text=None,
base_model=None,
pipeline="tagger,parser,ner",
vectors=None,
n_iter=30,
n_early_stopping=None,
n_examples=0,
use_gpu=-1,
version="0.0.0",
meta_path=None,
init_tok2vec=None,
parser_multitasks="",
entity_multitasks="",
noise_level=0.0,
eval_beam_widths="",
gold_preproc=False,
learn_tokens=False,
verbose=False,
debug=False,
):
"""
Train or update a spaCy model. Requires data to be formatted in spaCy's
JSON format. To convert data from other formats, use the `spacy convert`
command.
"""
msg = Printer()
util.fix_random_seed()
util.set_env_log(verbose)
# Make sure all files and paths exists if they are needed
train_path = util.ensure_path(train_path)
dev_path = util.ensure_path(dev_path)
meta_path = util.ensure_path(meta_path)
output_path = util.ensure_path(output_path)
if raw_text is not None:
raw_text = list(srsly.read_jsonl(raw_text))
if not train_path or not train_path.exists():
msg.fail("Training data not found", train_path, exits=1)
if not dev_path or not dev_path.exists():
msg.fail("Development data not found", dev_path, exits=1)
if meta_path is not None and not meta_path.exists():
msg.fail("Can't find model meta.json", meta_path, exits=1)
meta = srsly.read_json(meta_path) if meta_path else {}
if output_path.exists() and [p for p in output_path.iterdir() if p.is_dir()]:
msg.warn(
"Output directory is not empty",
"This can lead to unintended side effects when saving the model. "
"Please use an empty directory or a different path instead. If "
"the specified output path doesn't exist, the directory will be "
"created for you.",
)
if not output_path.exists():
output_path.mkdir()
# Take dropout and batch size as generators of values -- dropout
# starts high and decays sharply, to force the optimizer to explore.
# Batch size starts at 1 and grows, so that we make updates quickly
# at the beginning of training.
dropout_rates = util.decaying(
util.env_opt("dropout_from", 0.2),
util.env_opt("dropout_to", 0.2),
util.env_opt("dropout_decay", 0.0),
)
batch_sizes = util.compounding(
util.env_opt("batch_from", 100.0),
util.env_opt("batch_to", 1000.0),
util.env_opt("batch_compound", 1.001),
)
if not eval_beam_widths:
eval_beam_widths = [1]
else:
eval_beam_widths = [int(bw) for bw in eval_beam_widths.split(",")]
if 1 not in eval_beam_widths:
eval_beam_widths.append(1)
eval_beam_widths.sort()
has_beam_widths = eval_beam_widths != [1]
# Set up the base model and pipeline. If a base model is specified, load
# the model and make sure the pipeline matches the pipeline setting. If
# training starts from a blank model, intitalize the language class.
pipeline = [p.strip() for p in pipeline.split(",")]
msg.text("Training pipeline: {}".format(pipeline))
if base_model:
msg.text("Starting with base model '{}'".format(base_model))
nlp = util.load_model(base_model)
if nlp.lang != lang:
msg.fail(
"Model language ('{}') doesn't match language specified as "
"`lang` argument ('{}') ".format(nlp.lang, lang),
exits=1,
)
other_pipes = [pipe for pipe in nlp.pipe_names if pipe not in pipeline]
nlp.disable_pipes(*other_pipes)
for pipe in pipeline:
if pipe not in nlp.pipe_names:
nlp.add_pipe(nlp.create_pipe(pipe))
else:
msg.text("Starting with blank model '{}'".format(lang))
lang_cls = util.get_lang_class(lang)
nlp = lang_cls()
for pipe in pipeline:
nlp.add_pipe(nlp.create_pipe(pipe))
if learn_tokens:
nlp.add_pipe(nlp.create_pipe("merge_subtokens"))
if vectors:
msg.text("Loading vector from model '{}'".format(vectors))
_load_vectors(nlp, vectors)
# Multitask objectives
multitask_options = [("parser", parser_multitasks), ("ner", entity_multitasks)]
for pipe_name, multitasks in multitask_options:
if multitasks:
if pipe_name not in pipeline:
msg.fail(
"Can't use multitask objective without '{}' in the "
"pipeline".format(pipe_name)
)
pipe = nlp.get_pipe(pipe_name)
for objective in multitasks.split(","):
pipe.add_multitask_objective(objective)
# Prepare training corpus
msg.text("Counting training words (limit={})".format(n_examples))
corpus = GoldCorpus(train_path, dev_path, limit=n_examples)
n_train_words = corpus.count_train()
if base_model:
# Start with an existing model, use default optimizer
optimizer = create_default_optimizer(Model.ops)
else:
# Start with a blank model, call begin_training
optimizer = nlp.begin_training(lambda: corpus.train_tuples, device=use_gpu)
nlp._optimizer = None
# Load in pre-trained weights
if init_tok2vec is not None:
components = _load_pretrained_tok2vec(nlp, init_tok2vec)
msg.text("Loaded pretrained tok2vec for: {}".format(components))
# fmt: off
row_head = ["Itn", "Dep Loss", "NER Loss", "UAS", "NER P", "NER R", "NER F", "Tag %", "Token %", "CPU WPS", "GPU WPS"]
row_widths = [3, 10, 10, 7, 7, 7, 7, 7, 7, 7, 7]
if has_beam_widths:
row_head.insert(1, "Beam W.")
row_widths.insert(1, 7)
row_settings = {"widths": row_widths, "aligns": tuple(["r" for i in row_head]), "spacing": 2}
# fmt: on
print("")
msg.row(row_head, **row_settings)
msg.row(["-" * width for width in row_settings["widths"]], **row_settings)
try:
iter_since_best = 0
best_score = 0.0
for i in range(n_iter):
train_docs = corpus.train_docs(
nlp, noise_level=noise_level, gold_preproc=gold_preproc, max_length=0
)
if raw_text:
random.shuffle(raw_text)
raw_batches = util.minibatch(
(nlp.make_doc(rt["text"]) for rt in raw_text), size=8
)
words_seen = 0
with tqdm.tqdm(total=n_train_words, leave=False) as pbar:
losses = {}
for batch in util.minibatch_by_words(train_docs, size=batch_sizes):
if not batch:
continue
docs, golds = zip(*batch)
nlp.update(
docs,
golds,
sgd=optimizer,
drop=next(dropout_rates),
losses=losses,
)
if raw_text:
# If raw text is available, perform 'rehearsal' updates,
# which use unlabelled data to reduce overfitting.
raw_batch = list(next(raw_batches))
nlp.rehearse(raw_batch, sgd=optimizer, losses=losses)
if not int(os.environ.get("LOG_FRIENDLY", 0)):
pbar.update(sum(len(doc) for doc in docs))
words_seen += sum(len(doc) for doc in docs)
with nlp.use_params(optimizer.averages):
util.set_env_log(False)
epoch_model_path = output_path / ("model%d" % i)
nlp.to_disk(epoch_model_path)
nlp_loaded = util.load_model_from_path(epoch_model_path)
for beam_width in eval_beam_widths:
for name, component in nlp_loaded.pipeline:
if hasattr(component, "cfg"):
component.cfg["beam_width"] = beam_width
dev_docs = list(
corpus.dev_docs(nlp_loaded, gold_preproc=gold_preproc)
)
nwords = sum(len(doc_gold[0]) for doc_gold in dev_docs)
start_time = timer()
scorer = nlp_loaded.evaluate(dev_docs, debug)
end_time = timer()
if use_gpu < 0:
gpu_wps = None
cpu_wps = nwords / (end_time - start_time)
else:
gpu_wps = nwords / (end_time - start_time)
with Model.use_device("cpu"):
nlp_loaded = util.load_model_from_path(epoch_model_path)
for name, component in nlp_loaded.pipeline:
if hasattr(component, "cfg"):
component.cfg["beam_width"] = beam_width
dev_docs = list(
corpus.dev_docs(nlp_loaded, gold_preproc=gold_preproc)
)
start_time = timer()
scorer = nlp_loaded.evaluate(dev_docs)
end_time = timer()
cpu_wps = nwords / (end_time - start_time)
acc_loc = output_path / ("model%d" % i) / "accuracy.json"
srsly.write_json(acc_loc, scorer.scores)
# Update model meta.json
meta["lang"] = nlp.lang
meta["pipeline"] = nlp.pipe_names
meta["spacy_version"] = ">=%s" % about.__version__
if beam_width == 1:
meta["speed"] = {
"nwords": nwords,
"cpu": cpu_wps,
"gpu": gpu_wps,
}
meta["accuracy"] = scorer.scores
else:
meta.setdefault("beam_accuracy", {})
meta.setdefault("beam_speed", {})
meta["beam_accuracy"][beam_width] = scorer.scores
meta["beam_speed"][beam_width] = {
"nwords": nwords,
"cpu": cpu_wps,
"gpu": gpu_wps,
}
meta["vectors"] = {
"width": nlp.vocab.vectors_length,
"vectors": len(nlp.vocab.vectors),
"keys": nlp.vocab.vectors.n_keys,
"name": nlp.vocab.vectors.name,
}
meta.setdefault("name", "model%d" % i)
meta.setdefault("version", version)
meta_loc = output_path / ("model%d" % i) / "meta.json"
srsly.write_json(meta_loc, meta)
util.set_env_log(verbose)
progress = _get_progress(
i,
losses,
scorer.scores,
beam_width=beam_width if has_beam_widths else None,
cpu_wps=cpu_wps,
gpu_wps=gpu_wps,
)
msg.row(progress, **row_settings)
# Early stopping
if n_early_stopping is not None:
current_score = _score_for_model(meta)
if current_score < best_score:
iter_since_best += 1
else:
iter_since_best = 0
best_score = current_score
if iter_since_best >= n_early_stopping:
msg.text(
"Early stopping, best iteration "
"is: {}".format(i - iter_since_best)
)
msg.text(
"Best score = {}; Final iteration "
"score = {}".format(best_score, current_score)
)
break
finally:
with nlp.use_params(optimizer.averages):
final_model_path = output_path / "model-final"
nlp.to_disk(final_model_path)
msg.good("Saved model to output directory", final_model_path)
with msg.loading("Creating best model..."):
best_model_path = _collate_best_model(meta, output_path, nlp.pipe_names)
msg.good("Created best model", best_model_path)
|
Returns mean score between tasks in pipeline that can be used for early stopping.
def _score_for_model(meta):
""" Returns mean score between tasks in pipeline that can be used for early stopping. """
mean_acc = list()
pipes = meta["pipeline"]
acc = meta["accuracy"]
if "tagger" in pipes:
mean_acc.append(acc["tags_acc"])
if "parser" in pipes:
mean_acc.append((acc["uas"] + acc["las"]) / 2)
if "ner" in pipes:
mean_acc.append((acc["ents_p"] + acc["ents_r"] + acc["ents_f"]) / 3)
return sum(mean_acc) / len(mean_acc)
|
Load pre-trained weights for the 'token-to-vector' part of the component
models, which is typically a CNN. See 'spacy pretrain'. Experimental.
def _load_pretrained_tok2vec(nlp, loc):
"""Load pre-trained weights for the 'token-to-vector' part of the component
models, which is typically a CNN. See 'spacy pretrain'. Experimental.
"""
with loc.open("rb") as file_:
weights_data = file_.read()
loaded = []
for name, component in nlp.pipeline:
if hasattr(component, "model") and hasattr(component.model, "tok2vec"):
component.tok2vec.from_bytes(weights_data)
loaded.append(name)
return loaded
|
Convert conllu files into JSON format for use with train cli.
use_morphology parameter enables appending morphology to tags, which is
useful for languages such as Spanish, where UD tags are not so rich.
Extract NER tags if available and convert them so that they follow
BILUO and the Wikipedia scheme
def conllu2json(input_data, n_sents=10, use_morphology=False, lang=None):
"""
Convert conllu files into JSON format for use with train cli.
use_morphology parameter enables appending morphology to tags, which is
useful for languages such as Spanish, where UD tags are not so rich.
Extract NER tags if available and convert them so that they follow
BILUO and the Wikipedia scheme
"""
# by @dvsrepo, via #11 explosion/spacy-dev-resources
# by @katarkor
docs = []
sentences = []
conll_tuples = read_conllx(input_data, use_morphology=use_morphology)
checked_for_ner = False
has_ner_tags = False
for i, (raw_text, tokens) in enumerate(conll_tuples):
sentence, brackets = tokens[0]
if not checked_for_ner:
has_ner_tags = is_ner(sentence[5][0])
checked_for_ner = True
sentences.append(generate_sentence(sentence, has_ner_tags))
# Real-sized documents could be extracted using the comments on the
# conluu document
if len(sentences) % n_sents == 0:
doc = create_doc(sentences, i)
docs.append(doc)
sentences = []
return docs
|
Check the 10th column of the first token to determine if the file contains
NER tags
def is_ner(tag):
"""
Check the 10th column of the first token to determine if the file contains
NER tags
"""
tag_match = re.match("([A-Z_]+)-([A-Z_]+)", tag)
if tag_match:
return True
elif tag == "O":
return True
else:
return False
|
Simplify tags obtained from the dataset in order to follow Wikipedia
scheme (PER, LOC, ORG, MISC). 'PER', 'LOC' and 'ORG' keep their tags, while
'GPE_LOC' is simplified to 'LOC', 'GPE_ORG' to 'ORG' and all remaining tags to
'MISC'.
def simplify_tags(iob):
"""
Simplify tags obtained from the dataset in order to follow Wikipedia
scheme (PER, LOC, ORG, MISC). 'PER', 'LOC' and 'ORG' keep their tags, while
'GPE_LOC' is simplified to 'LOC', 'GPE_ORG' to 'ORG' and all remaining tags to
'MISC'.
"""
new_iob = []
for tag in iob:
tag_match = re.match("([A-Z_]+)-([A-Z_]+)", tag)
if tag_match:
prefix = tag_match.group(1)
suffix = tag_match.group(2)
if suffix == "GPE_LOC":
suffix = "LOC"
elif suffix == "GPE_ORG":
suffix = "ORG"
elif suffix != "PER" and suffix != "LOC" and suffix != "ORG":
suffix = "MISC"
tag = prefix + "-" + suffix
new_iob.append(tag)
return new_iob
|
Print info about spaCy installation. If a model shortcut link is
speficied as an argument, print model information. Flag --markdown
prints details in Markdown for easy copy-pasting to GitHub issues.
def info(model=None, markdown=False, silent=False):
"""
Print info about spaCy installation. If a model shortcut link is
speficied as an argument, print model information. Flag --markdown
prints details in Markdown for easy copy-pasting to GitHub issues.
"""
msg = Printer()
if model:
if util.is_package(model):
model_path = util.get_package_path(model)
else:
model_path = util.get_data_path() / model
meta_path = model_path / "meta.json"
if not meta_path.is_file():
msg.fail("Can't find model meta.json", meta_path, exits=1)
meta = srsly.read_json(meta_path)
if model_path.resolve() != model_path:
meta["link"] = path2str(model_path)
meta["source"] = path2str(model_path.resolve())
else:
meta["source"] = path2str(model_path)
if not silent:
title = "Info about model '{}'".format(model)
model_meta = {
k: v for k, v in meta.items() if k not in ("accuracy", "speed")
}
if markdown:
print_markdown(model_meta, title=title)
else:
msg.table(model_meta, title=title)
return meta
data = {
"spaCy version": about.__version__,
"Location": path2str(Path(__file__).parent.parent),
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Models": list_models(),
}
if not silent:
title = "Info about spaCy"
if markdown:
print_markdown(data, title=title)
else:
msg.table(data, title=title)
return data
|
Print data in GitHub-flavoured Markdown format for issues etc.
data (dict or list of tuples): Label/value pairs.
title (unicode or None): Title, will be rendered as headline 2.
def print_markdown(data, title=None):
"""Print data in GitHub-flavoured Markdown format for issues etc.
data (dict or list of tuples): Label/value pairs.
title (unicode or None): Title, will be rendered as headline 2.
"""
markdown = []
for key, value in data.items():
if isinstance(value, basestring_) and Path(value).exists():
continue
markdown.append("* **{}:** {}".format(key, unicode_(value)))
if title:
print("\n## {}".format(title))
print("\n{}\n".format("\n".join(markdown)))
|
Load the model, set up the pipeline and train the parser.
def main(model=None, output_dir=None, n_iter=15):
"""Load the model, set up the pipeline and train the parser."""
if model is not None:
nlp = spacy.load(model) # load existing spaCy model
print("Loaded model '%s'" % model)
else:
nlp = spacy.blank("en") # create blank Language class
print("Created blank 'en' model")
# We'll use the built-in dependency parser class, but we want to create a
# fresh instance – just in case.
if "parser" in nlp.pipe_names:
nlp.remove_pipe("parser")
parser = nlp.create_pipe("parser")
nlp.add_pipe(parser, first=True)
for text, annotations in TRAIN_DATA:
for dep in annotations.get("deps", []):
parser.add_label(dep)
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != "parser"]
with nlp.disable_pipes(*other_pipes): # only train parser
optimizer = nlp.begin_training()
for itn in range(n_iter):
random.shuffle(TRAIN_DATA)
losses = {}
# batch up the examples using spaCy's minibatch
batches = minibatch(TRAIN_DATA, size=compounding(4.0, 32.0, 1.001))
for batch in batches:
texts, annotations = zip(*batch)
nlp.update(texts, annotations, sgd=optimizer, losses=losses)
print("Losses", losses)
# test the trained model
test_model(nlp)
# save model to output directory
if output_dir is not None:
output_dir = Path(output_dir)
if not output_dir.exists():
output_dir.mkdir()
nlp.to_disk(output_dir)
print("Saved model to", output_dir)
# test the saved model
print("Loading from", output_dir)
nlp2 = spacy.load(output_dir)
test_model(nlp2)
|
Get a pipeline component for a given component name.
name (unicode): Name of pipeline component to get.
RETURNS (callable): The pipeline component.
DOCS: https://spacy.io/api/language#get_pipe
def get_pipe(self, name):
"""Get a pipeline component for a given component name.
name (unicode): Name of pipeline component to get.
RETURNS (callable): The pipeline component.
DOCS: https://spacy.io/api/language#get_pipe
"""
for pipe_name, component in self.pipeline:
if pipe_name == name:
return component
raise KeyError(Errors.E001.format(name=name, opts=self.pipe_names))
|
Create a pipeline component from a factory.
name (unicode): Factory name to look up in `Language.factories`.
config (dict): Configuration parameters to initialise component.
RETURNS (callable): Pipeline component.
DOCS: https://spacy.io/api/language#create_pipe
def create_pipe(self, name, config=dict()):
"""Create a pipeline component from a factory.
name (unicode): Factory name to look up in `Language.factories`.
config (dict): Configuration parameters to initialise component.
RETURNS (callable): Pipeline component.
DOCS: https://spacy.io/api/language#create_pipe
"""
if name not in self.factories:
if name == "sbd":
raise KeyError(Errors.E108.format(name=name))
else:
raise KeyError(Errors.E002.format(name=name))
factory = self.factories[name]
return factory(self, **config)
|
Add a component to the processing pipeline. Valid components are
callables that take a `Doc` object, modify it and return it. Only one
of before/after/first/last can be set. Default behaviour is "last".
component (callable): The pipeline component.
name (unicode): Name of pipeline component. Overwrites existing
component.name attribute if available. If no name is set and
the component exposes no name attribute, component.__name__ is
used. An error is raised if a name already exists in the pipeline.
before (unicode): Component name to insert component directly before.
after (unicode): Component name to insert component directly after.
first (bool): Insert component first / not first in the pipeline.
last (bool): Insert component last / not last in the pipeline.
DOCS: https://spacy.io/api/language#add_pipe
def add_pipe(
self, component, name=None, before=None, after=None, first=None, last=None
):
"""Add a component to the processing pipeline. Valid components are
callables that take a `Doc` object, modify it and return it. Only one
of before/after/first/last can be set. Default behaviour is "last".
component (callable): The pipeline component.
name (unicode): Name of pipeline component. Overwrites existing
component.name attribute if available. If no name is set and
the component exposes no name attribute, component.__name__ is
used. An error is raised if a name already exists in the pipeline.
before (unicode): Component name to insert component directly before.
after (unicode): Component name to insert component directly after.
first (bool): Insert component first / not first in the pipeline.
last (bool): Insert component last / not last in the pipeline.
DOCS: https://spacy.io/api/language#add_pipe
"""
if not hasattr(component, "__call__"):
msg = Errors.E003.format(component=repr(component), name=name)
if isinstance(component, basestring_) and component in self.factories:
msg += Errors.E004.format(component=component)
raise ValueError(msg)
if name is None:
if hasattr(component, "name"):
name = component.name
elif hasattr(component, "__name__"):
name = component.__name__
elif hasattr(component, "__class__") and hasattr(
component.__class__, "__name__"
):
name = component.__class__.__name__
else:
name = repr(component)
if name in self.pipe_names:
raise ValueError(Errors.E007.format(name=name, opts=self.pipe_names))
if sum([bool(before), bool(after), bool(first), bool(last)]) >= 2:
raise ValueError(Errors.E006)
pipe = (name, component)
if last or not any([first, before, after]):
self.pipeline.append(pipe)
elif first:
self.pipeline.insert(0, pipe)
elif before and before in self.pipe_names:
self.pipeline.insert(self.pipe_names.index(before), pipe)
elif after and after in self.pipe_names:
self.pipeline.insert(self.pipe_names.index(after) + 1, pipe)
else:
raise ValueError(
Errors.E001.format(name=before or after, opts=self.pipe_names)
)
|
Replace a component in the pipeline.
name (unicode): Name of the component to replace.
component (callable): Pipeline component.
DOCS: https://spacy.io/api/language#replace_pipe
def replace_pipe(self, name, component):
"""Replace a component in the pipeline.
name (unicode): Name of the component to replace.
component (callable): Pipeline component.
DOCS: https://spacy.io/api/language#replace_pipe
"""
if name not in self.pipe_names:
raise ValueError(Errors.E001.format(name=name, opts=self.pipe_names))
self.pipeline[self.pipe_names.index(name)] = (name, component)
|
Rename a pipeline component.
old_name (unicode): Name of the component to rename.
new_name (unicode): New name of the component.
DOCS: https://spacy.io/api/language#rename_pipe
def rename_pipe(self, old_name, new_name):
"""Rename a pipeline component.
old_name (unicode): Name of the component to rename.
new_name (unicode): New name of the component.
DOCS: https://spacy.io/api/language#rename_pipe
"""
if old_name not in self.pipe_names:
raise ValueError(Errors.E001.format(name=old_name, opts=self.pipe_names))
if new_name in self.pipe_names:
raise ValueError(Errors.E007.format(name=new_name, opts=self.pipe_names))
i = self.pipe_names.index(old_name)
self.pipeline[i] = (new_name, self.pipeline[i][1])
|
Remove a component from the pipeline.
name (unicode): Name of the component to remove.
RETURNS (tuple): A `(name, component)` tuple of the removed component.
DOCS: https://spacy.io/api/language#remove_pipe
def remove_pipe(self, name):
"""Remove a component from the pipeline.
name (unicode): Name of the component to remove.
RETURNS (tuple): A `(name, component)` tuple of the removed component.
DOCS: https://spacy.io/api/language#remove_pipe
"""
if name not in self.pipe_names:
raise ValueError(Errors.E001.format(name=name, opts=self.pipe_names))
return self.pipeline.pop(self.pipe_names.index(name))
|
Update the models in the pipeline.
docs (iterable): A batch of `Doc` objects.
golds (iterable): A batch of `GoldParse` objects.
drop (float): The droput rate.
sgd (callable): An optimizer.
RETURNS (dict): Results from the update.
DOCS: https://spacy.io/api/language#update
def update(self, docs, golds, drop=0.0, sgd=None, losses=None, component_cfg=None):
"""Update the models in the pipeline.
docs (iterable): A batch of `Doc` objects.
golds (iterable): A batch of `GoldParse` objects.
drop (float): The droput rate.
sgd (callable): An optimizer.
RETURNS (dict): Results from the update.
DOCS: https://spacy.io/api/language#update
"""
if len(docs) != len(golds):
raise IndexError(Errors.E009.format(n_docs=len(docs), n_golds=len(golds)))
if len(docs) == 0:
return
if sgd is None:
if self._optimizer is None:
self._optimizer = create_default_optimizer(Model.ops)
sgd = self._optimizer
# Allow dict of args to GoldParse, instead of GoldParse objects.
gold_objs = []
doc_objs = []
for doc, gold in zip(docs, golds):
if isinstance(doc, basestring_):
doc = self.make_doc(doc)
if not isinstance(gold, GoldParse):
gold = GoldParse(doc, **gold)
doc_objs.append(doc)
gold_objs.append(gold)
golds = gold_objs
docs = doc_objs
grads = {}
def get_grads(W, dW, key=None):
grads[key] = (W, dW)
get_grads.alpha = sgd.alpha
get_grads.b1 = sgd.b1
get_grads.b2 = sgd.b2
pipes = list(self.pipeline)
random.shuffle(pipes)
if component_cfg is None:
component_cfg = {}
for name, proc in pipes:
if not hasattr(proc, "update"):
continue
grads = {}
kwargs = component_cfg.get(name, {})
kwargs.setdefault("drop", drop)
proc.update(docs, golds, sgd=get_grads, losses=losses, **kwargs)
for key, (W, dW) in grads.items():
sgd(W, dW, key=key)
|
Make a "rehearsal" update to the models in the pipeline, to prevent
forgetting. Rehearsal updates run an initial copy of the model over some
data, and update the model so its current predictions are more like the
initial ones. This is useful for keeping a pre-trained model on-track,
even if you're updating it with a smaller set of examples.
docs (iterable): A batch of `Doc` objects.
drop (float): The droput rate.
sgd (callable): An optimizer.
RETURNS (dict): Results from the update.
EXAMPLE:
>>> raw_text_batches = minibatch(raw_texts)
>>> for labelled_batch in minibatch(zip(train_docs, train_golds)):
>>> docs, golds = zip(*train_docs)
>>> nlp.update(docs, golds)
>>> raw_batch = [nlp.make_doc(text) for text in next(raw_text_batches)]
>>> nlp.rehearse(raw_batch)
def rehearse(self, docs, sgd=None, losses=None, config=None):
"""Make a "rehearsal" update to the models in the pipeline, to prevent
forgetting. Rehearsal updates run an initial copy of the model over some
data, and update the model so its current predictions are more like the
initial ones. This is useful for keeping a pre-trained model on-track,
even if you're updating it with a smaller set of examples.
docs (iterable): A batch of `Doc` objects.
drop (float): The droput rate.
sgd (callable): An optimizer.
RETURNS (dict): Results from the update.
EXAMPLE:
>>> raw_text_batches = minibatch(raw_texts)
>>> for labelled_batch in minibatch(zip(train_docs, train_golds)):
>>> docs, golds = zip(*train_docs)
>>> nlp.update(docs, golds)
>>> raw_batch = [nlp.make_doc(text) for text in next(raw_text_batches)]
>>> nlp.rehearse(raw_batch)
"""
# TODO: document
if len(docs) == 0:
return
if sgd is None:
if self._optimizer is None:
self._optimizer = create_default_optimizer(Model.ops)
sgd = self._optimizer
docs = list(docs)
for i, doc in enumerate(docs):
if isinstance(doc, basestring_):
docs[i] = self.make_doc(doc)
pipes = list(self.pipeline)
random.shuffle(pipes)
if config is None:
config = {}
grads = {}
def get_grads(W, dW, key=None):
grads[key] = (W, dW)
get_grads.alpha = sgd.alpha
get_grads.b1 = sgd.b1
get_grads.b2 = sgd.b2
for name, proc in pipes:
if not hasattr(proc, "rehearse"):
continue
grads = {}
proc.rehearse(docs, sgd=get_grads, losses=losses, **config.get(name, {}))
for key, (W, dW) in grads.items():
sgd(W, dW, key=key)
return losses
|
Can be called before training to pre-process gold data. By default,
it handles nonprojectivity and adds missing tags to the tag map.
docs_golds (iterable): Tuples of `Doc` and `GoldParse` objects.
YIELDS (tuple): Tuples of preprocessed `Doc` and `GoldParse` objects.
def preprocess_gold(self, docs_golds):
"""Can be called before training to pre-process gold data. By default,
it handles nonprojectivity and adds missing tags to the tag map.
docs_golds (iterable): Tuples of `Doc` and `GoldParse` objects.
YIELDS (tuple): Tuples of preprocessed `Doc` and `GoldParse` objects.
"""
for name, proc in self.pipeline:
if hasattr(proc, "preprocess_gold"):
docs_golds = proc.preprocess_gold(docs_golds)
for doc, gold in docs_golds:
yield doc, gold
|
Allocate models, pre-process training data and acquire a trainer and
optimizer. Used as a contextmanager.
get_gold_tuples (function): Function returning gold data
component_cfg (dict): Config parameters for specific components.
**cfg: Config parameters.
RETURNS: An optimizer.
DOCS: https://spacy.io/api/language#begin_training
def begin_training(self, get_gold_tuples=None, sgd=None, component_cfg=None, **cfg):
"""Allocate models, pre-process training data and acquire a trainer and
optimizer. Used as a contextmanager.
get_gold_tuples (function): Function returning gold data
component_cfg (dict): Config parameters for specific components.
**cfg: Config parameters.
RETURNS: An optimizer.
DOCS: https://spacy.io/api/language#begin_training
"""
if get_gold_tuples is None:
get_gold_tuples = lambda: []
# Populate vocab
else:
for _, annots_brackets in get_gold_tuples():
for annots, _ in annots_brackets:
for word in annots[1]:
_ = self.vocab[word] # noqa: F841
if cfg.get("device", -1) >= 0:
util.use_gpu(cfg["device"])
if self.vocab.vectors.data.shape[1] >= 1:
self.vocab.vectors.data = Model.ops.asarray(self.vocab.vectors.data)
link_vectors_to_models(self.vocab)
if self.vocab.vectors.data.shape[1]:
cfg["pretrained_vectors"] = self.vocab.vectors.name
if sgd is None:
sgd = create_default_optimizer(Model.ops)
self._optimizer = sgd
if component_cfg is None:
component_cfg = {}
for name, proc in self.pipeline:
if hasattr(proc, "begin_training"):
kwargs = component_cfg.get(name, {})
kwargs.update(cfg)
proc.begin_training(
get_gold_tuples,
pipeline=self.pipeline,
sgd=self._optimizer,
**kwargs
)
return self._optimizer
|
Continue training a pre-trained model.
Create and return an optimizer, and initialize "rehearsal" for any pipeline
component that has a .rehearse() method. Rehearsal is used to prevent
models from "forgetting" their initialised "knowledge". To perform
rehearsal, collect samples of text you want the models to retain performance
on, and call nlp.rehearse() with a batch of Doc objects.
def resume_training(self, sgd=None, **cfg):
"""Continue training a pre-trained model.
Create and return an optimizer, and initialize "rehearsal" for any pipeline
component that has a .rehearse() method. Rehearsal is used to prevent
models from "forgetting" their initialised "knowledge". To perform
rehearsal, collect samples of text you want the models to retain performance
on, and call nlp.rehearse() with a batch of Doc objects.
"""
if cfg.get("device", -1) >= 0:
util.use_gpu(cfg["device"])
if self.vocab.vectors.data.shape[1] >= 1:
self.vocab.vectors.data = Model.ops.asarray(self.vocab.vectors.data)
link_vectors_to_models(self.vocab)
if self.vocab.vectors.data.shape[1]:
cfg["pretrained_vectors"] = self.vocab.vectors.name
if sgd is None:
sgd = create_default_optimizer(Model.ops)
self._optimizer = sgd
for name, proc in self.pipeline:
if hasattr(proc, "_rehearsal_model"):
proc._rehearsal_model = deepcopy(proc.model)
return self._optimizer
|
Replace weights of models in the pipeline with those provided in the
params dictionary. Can be used as a contextmanager, in which case,
models go back to their original weights after the block.
params (dict): A dictionary of parameters keyed by model ID.
**cfg: Config parameters.
EXAMPLE:
>>> with nlp.use_params(optimizer.averages):
>>> nlp.to_disk('/tmp/checkpoint')
def use_params(self, params, **cfg):
"""Replace weights of models in the pipeline with those provided in the
params dictionary. Can be used as a contextmanager, in which case,
models go back to their original weights after the block.
params (dict): A dictionary of parameters keyed by model ID.
**cfg: Config parameters.
EXAMPLE:
>>> with nlp.use_params(optimizer.averages):
>>> nlp.to_disk('/tmp/checkpoint')
"""
contexts = [
pipe.use_params(params)
for name, pipe in self.pipeline
if hasattr(pipe, "use_params")
]
# TODO: Having trouble with contextlib
# Workaround: these aren't actually context managers atm.
for context in contexts:
try:
next(context)
except StopIteration:
pass
yield
for context in contexts:
try:
next(context)
except StopIteration:
pass
|
Process texts as a stream, and yield `Doc` objects in order.
texts (iterator): A sequence of texts to process.
as_tuples (bool): If set to True, inputs should be a sequence of
(text, context) tuples. Output will then be a sequence of
(doc, context) tuples. Defaults to False.
batch_size (int): The number of texts to buffer.
disable (list): Names of the pipeline components to disable.
cleanup (bool): If True, unneeded strings are freed to control memory
use. Experimental.
component_cfg (dict): An optional dictionary with extra keyword
arguments for specific components.
YIELDS (Doc): Documents in the order of the original text.
DOCS: https://spacy.io/api/language#pipe
def pipe(
self,
texts,
as_tuples=False,
n_threads=-1,
batch_size=1000,
disable=[],
cleanup=False,
component_cfg=None,
):
"""Process texts as a stream, and yield `Doc` objects in order.
texts (iterator): A sequence of texts to process.
as_tuples (bool): If set to True, inputs should be a sequence of
(text, context) tuples. Output will then be a sequence of
(doc, context) tuples. Defaults to False.
batch_size (int): The number of texts to buffer.
disable (list): Names of the pipeline components to disable.
cleanup (bool): If True, unneeded strings are freed to control memory
use. Experimental.
component_cfg (dict): An optional dictionary with extra keyword
arguments for specific components.
YIELDS (Doc): Documents in the order of the original text.
DOCS: https://spacy.io/api/language#pipe
"""
if n_threads != -1:
deprecation_warning(Warnings.W016)
if as_tuples:
text_context1, text_context2 = itertools.tee(texts)
texts = (tc[0] for tc in text_context1)
contexts = (tc[1] for tc in text_context2)
docs = self.pipe(
texts,
batch_size=batch_size,
disable=disable,
component_cfg=component_cfg,
)
for doc, context in izip(docs, contexts):
yield (doc, context)
return
docs = (self.make_doc(text) for text in texts)
if component_cfg is None:
component_cfg = {}
for name, proc in self.pipeline:
if name in disable:
continue
kwargs = component_cfg.get(name, {})
# Allow component_cfg to overwrite the top-level kwargs.
kwargs.setdefault("batch_size", batch_size)
if hasattr(proc, "pipe"):
docs = proc.pipe(docs, **kwargs)
else:
# Apply the function, but yield the doc
docs = _pipe(proc, docs, kwargs)
# Track weakrefs of "recent" documents, so that we can see when they
# expire from memory. When they do, we know we don't need old strings.
# This way, we avoid maintaining an unbounded growth in string entries
# in the string store.
recent_refs = weakref.WeakSet()
old_refs = weakref.WeakSet()
# Keep track of the original string data, so that if we flush old strings,
# we can recover the original ones. However, we only want to do this if we're
# really adding strings, to save up-front costs.
original_strings_data = None
nr_seen = 0
for doc in docs:
yield doc
if cleanup:
recent_refs.add(doc)
if nr_seen < 10000:
old_refs.add(doc)
nr_seen += 1
elif len(old_refs) == 0:
old_refs, recent_refs = recent_refs, old_refs
if original_strings_data is None:
original_strings_data = list(self.vocab.strings)
else:
keys, strings = self.vocab.strings._cleanup_stale_strings(
original_strings_data
)
self.vocab._reset_cache(keys, strings)
self.tokenizer._reset_cache(keys)
nr_seen = 0
|
Save the current state to a directory. If a model is loaded, this
will include the model.
path (unicode or Path): Path to a directory, which will be created if
it doesn't exist.
exclude (list): Names of components or serialization fields to exclude.
DOCS: https://spacy.io/api/language#to_disk
def to_disk(self, path, exclude=tuple(), disable=None):
"""Save the current state to a directory. If a model is loaded, this
will include the model.
path (unicode or Path): Path to a directory, which will be created if
it doesn't exist.
exclude (list): Names of components or serialization fields to exclude.
DOCS: https://spacy.io/api/language#to_disk
"""
if disable is not None:
deprecation_warning(Warnings.W014)
exclude = disable
path = util.ensure_path(path)
serializers = OrderedDict()
serializers["tokenizer"] = lambda p: self.tokenizer.to_disk(p, exclude=["vocab"])
serializers["meta.json"] = lambda p: p.open("w").write(srsly.json_dumps(self.meta))
for name, proc in self.pipeline:
if not hasattr(proc, "name"):
continue
if name in exclude:
continue
if not hasattr(proc, "to_disk"):
continue
serializers[name] = lambda p, proc=proc: proc.to_disk(p, exclude=["vocab"])
serializers["vocab"] = lambda p: self.vocab.to_disk(p)
util.to_disk(path, serializers, exclude)
|
Loads state from a directory. Modifies the object in place and
returns it. If the saved `Language` object contains a model, the
model will be loaded.
path (unicode or Path): A path to a directory.
exclude (list): Names of components or serialization fields to exclude.
RETURNS (Language): The modified `Language` object.
DOCS: https://spacy.io/api/language#from_disk
def from_disk(self, path, exclude=tuple(), disable=None):
"""Loads state from a directory. Modifies the object in place and
returns it. If the saved `Language` object contains a model, the
model will be loaded.
path (unicode or Path): A path to a directory.
exclude (list): Names of components or serialization fields to exclude.
RETURNS (Language): The modified `Language` object.
DOCS: https://spacy.io/api/language#from_disk
"""
if disable is not None:
deprecation_warning(Warnings.W014)
exclude = disable
path = util.ensure_path(path)
deserializers = OrderedDict()
deserializers["meta.json"] = lambda p: self.meta.update(srsly.read_json(p))
deserializers["vocab"] = lambda p: self.vocab.from_disk(p) and _fix_pretrained_vectors_name(self)
deserializers["tokenizer"] = lambda p: self.tokenizer.from_disk(p, exclude=["vocab"])
for name, proc in self.pipeline:
if name in exclude:
continue
if not hasattr(proc, "from_disk"):
continue
deserializers[name] = lambda p, proc=proc: proc.from_disk(p, exclude=["vocab"])
if not (path / "vocab").exists() and "vocab" not in exclude:
# Convert to list here in case exclude is (default) tuple
exclude = list(exclude) + ["vocab"]
util.from_disk(path, deserializers, exclude)
self._path = path
return self
|
Serialize the current state to a binary string.
exclude (list): Names of components or serialization fields to exclude.
RETURNS (bytes): The serialized form of the `Language` object.
DOCS: https://spacy.io/api/language#to_bytes
def to_bytes(self, exclude=tuple(), disable=None, **kwargs):
"""Serialize the current state to a binary string.
exclude (list): Names of components or serialization fields to exclude.
RETURNS (bytes): The serialized form of the `Language` object.
DOCS: https://spacy.io/api/language#to_bytes
"""
if disable is not None:
deprecation_warning(Warnings.W014)
exclude = disable
serializers = OrderedDict()
serializers["vocab"] = lambda: self.vocab.to_bytes()
serializers["tokenizer"] = lambda: self.tokenizer.to_bytes(exclude=["vocab"])
serializers["meta.json"] = lambda: srsly.json_dumps(self.meta)
for name, proc in self.pipeline:
if name in exclude:
continue
if not hasattr(proc, "to_bytes"):
continue
serializers[name] = lambda proc=proc: proc.to_bytes(exclude=["vocab"])
exclude = util.get_serialization_exclude(serializers, exclude, kwargs)
return util.to_bytes(serializers, exclude)
|
Load state from a binary string.
bytes_data (bytes): The data to load from.
exclude (list): Names of components or serialization fields to exclude.
RETURNS (Language): The `Language` object.
DOCS: https://spacy.io/api/language#from_bytes
def from_bytes(self, bytes_data, exclude=tuple(), disable=None, **kwargs):
"""Load state from a binary string.
bytes_data (bytes): The data to load from.
exclude (list): Names of components or serialization fields to exclude.
RETURNS (Language): The `Language` object.
DOCS: https://spacy.io/api/language#from_bytes
"""
if disable is not None:
deprecation_warning(Warnings.W014)
exclude = disable
deserializers = OrderedDict()
deserializers["meta.json"] = lambda b: self.meta.update(srsly.json_loads(b))
deserializers["vocab"] = lambda b: self.vocab.from_bytes(b) and _fix_pretrained_vectors_name(self)
deserializers["tokenizer"] = lambda b: self.tokenizer.from_bytes(b, exclude=["vocab"])
for name, proc in self.pipeline:
if name in exclude:
continue
if not hasattr(proc, "from_bytes"):
continue
deserializers[name] = lambda b, proc=proc: proc.from_bytes(b, exclude=["vocab"])
exclude = util.get_serialization_exclude(deserializers, exclude, kwargs)
util.from_bytes(bytes_data, deserializers, exclude)
return self
|
Restore the pipeline to its state when DisabledPipes was created.
def restore(self):
"""Restore the pipeline to its state when DisabledPipes was created."""
current, self.nlp.pipeline = self.nlp.pipeline, self.original_pipeline
unexpected = [name for name, pipe in current if not self.nlp.has_pipe(name)]
if unexpected:
# Don't change the pipeline if we're raising an error.
self.nlp.pipeline = current
raise ValueError(Errors.E008.format(names=unexpected))
self[:] = []
|
Yields all available rules.
:type rules_paths: [Path]
:rtype: Iterable[Rule]
def get_loaded_rules(rules_paths):
"""Yields all available rules.
:type rules_paths: [Path]
:rtype: Iterable[Rule]
"""
for path in rules_paths:
if path.name != '__init__.py':
rule = Rule.from_path(path)
if rule.is_enabled:
yield rule
|
Yields all rules import paths.
:rtype: Iterable[Path]
def get_rules_import_paths():
"""Yields all rules import paths.
:rtype: Iterable[Path]
"""
# Bundled rules:
yield Path(__file__).parent.joinpath('rules')
# Rules defined by user:
yield settings.user_dir.joinpath('rules')
# Packages with third-party rules:
for path in sys.path:
for contrib_module in Path(path).glob('thefuck_contrib_*'):
contrib_rules = contrib_module.joinpath('rules')
if contrib_rules.is_dir():
yield contrib_rules
|
Returns all enabled rules.
:rtype: [Rule]
def get_rules():
"""Returns all enabled rules.
:rtype: [Rule]
"""
paths = [rule_path for path in get_rules_import_paths()
for rule_path in sorted(path.glob('*.py'))]
return sorted(get_loaded_rules(paths),
key=lambda rule: rule.priority)
|
Yields sorted commands without duplicates.
:type corrected_commands: Iterable[thefuck.types.CorrectedCommand]
:rtype: Iterable[thefuck.types.CorrectedCommand]
def organize_commands(corrected_commands):
"""Yields sorted commands without duplicates.
:type corrected_commands: Iterable[thefuck.types.CorrectedCommand]
:rtype: Iterable[thefuck.types.CorrectedCommand]
"""
try:
first_command = next(corrected_commands)
yield first_command
except StopIteration:
return
without_duplicates = {
command for command in sorted(
corrected_commands, key=lambda command: command.priority)
if command != first_command}
sorted_commands = sorted(
without_duplicates,
key=lambda corrected_command: corrected_command.priority)
logs.debug('Corrected commands: '.format(
', '.join(u'{}'.format(cmd) for cmd in [first_command] + sorted_commands)))
for command in sorted_commands:
yield command
|
Returns generator with sorted and unique corrected commands.
:type command: thefuck.types.Command
:rtype: Iterable[thefuck.types.CorrectedCommand]
def get_corrected_commands(command):
"""Returns generator with sorted and unique corrected commands.
:type command: thefuck.types.Command
:rtype: Iterable[thefuck.types.CorrectedCommand]
"""
corrected_commands = (
corrected for rule in get_rules()
if rule.is_match(command)
for corrected in rule.get_corrected_commands(command))
return organize_commands(corrected_commands)
|
Fixes previous command. Used when `thefuck` called without arguments.
def fix_command(known_args):
"""Fixes previous command. Used when `thefuck` called without arguments."""
settings.init(known_args)
with logs.debug_time('Total'):
logs.debug(u'Run with settings: {}'.format(pformat(settings)))
raw_command = _get_raw_command(known_args)
try:
command = types.Command.from_raw_script(raw_command)
except EmptyCommand:
logs.debug('Empty command, nothing to do')
return
corrected_commands = get_corrected_commands(command)
selected_command = select_command(corrected_commands)
if selected_command:
selected_command.run(command)
else:
sys.exit(1)
|
Gets command output from shell logger.
def get_output(script):
"""Gets command output from shell logger."""
with logs.debug_time(u'Read output from external shell logger'):
commands = _get_last_n(const.SHELL_LOGGER_LIMIT)
for command in commands:
if command['command'] == script:
lines = _get_output_lines(command['output'])
output = '\n'.join(lines).strip()
return output
else:
logs.warn("Output isn't available in shell logger")
return None
|
Returns list of history entries.
def _get_history_lines(self):
"""Returns list of history entries."""
history_file_name = self._get_history_file_name()
if os.path.isfile(history_file_name):
with io.open(history_file_name, 'r',
encoding='utf-8', errors='ignore') as history_file:
lines = history_file.readlines()
if settings.history_limit:
lines = lines[-settings.history_limit:]
for line in lines:
prepared = self._script_from_history(line) \
.strip()
if prepared:
yield prepared
|
Split the command using shell-like syntax.
def split_command(self, command):
"""Split the command using shell-like syntax."""
encoded = self.encode_utf8(command)
try:
splitted = [s.replace("??", "\\ ") for s in shlex.split(encoded.replace('\\ ', '??'))]
except ValueError:
splitted = encoded.split(' ')
return self.decode_utf8(splitted)
|
Return a shell-escaped version of the string s.
def quote(self, s):
"""Return a shell-escaped version of the string s."""
if six.PY2:
from pipes import quote
else:
from shlex import quote
return quote(s)
|
Returns the name and version of the current shell
def info(self):
"""Returns the name and version of the current shell"""
proc = Popen(['fish', '--version'],
stdout=PIPE, stderr=DEVNULL)
version = proc.stdout.read().decode('utf-8').split()[-1]
return u'Fish Shell {}'.format(version)
|
Puts command script to shell history.
def _put_to_history(self, command_script):
"""Puts command script to shell history."""
history_file_name = self._get_history_file_name()
if os.path.isfile(history_file_name):
with open(history_file_name, 'a') as history:
entry = self._get_history_line(command_script)
if six.PY2:
history.write(entry.encode('utf-8'))
else:
history.write(entry)
|
To get brew default commands on local environment
def _get_brew_commands(brew_path_prefix):
"""To get brew default commands on local environment"""
brew_cmd_path = brew_path_prefix + BREW_CMD_PATH
return [name[:-3] for name in os.listdir(brew_cmd_path)
if name.endswith(('.rb', '.sh'))]
|
To get tap's specific commands
https://github.com/Homebrew/homebrew/blob/master/Library/brew.rb#L115
def _get_brew_tap_specific_commands(brew_path_prefix):
"""To get tap's specific commands
https://github.com/Homebrew/homebrew/blob/master/Library/brew.rb#L115"""
commands = []
brew_taps_path = brew_path_prefix + TAP_PATH
for user in _get_directory_names_only(brew_taps_path):
taps = _get_directory_names_only(brew_taps_path + '/%s' % user)
# Brew Taps's naming rule
# https://github.com/Homebrew/homebrew/blob/master/share/doc/homebrew/brew-tap.md#naming-conventions-and-limitations
taps = (tap for tap in taps if tap.startswith('homebrew-'))
for tap in taps:
tap_cmd_path = brew_taps_path + TAP_CMD_PATH % (user, tap)
if os.path.isdir(tap_cmd_path):
commands += (name.replace('brew-', '').replace('.rb', '')
for name in os.listdir(tap_cmd_path)
if _is_brew_tap_cmd_naming(name))
return commands
|
Returns the name and version of the current shell
def info(self):
"""Returns the name and version of the current shell"""
proc = Popen(['zsh', '-c', 'echo $ZSH_VERSION'],
stdout=PIPE, stderr=DEVNULL)
version = proc.stdout.read().decode('utf-8').strip()
return u'ZSH {}'.format(version)
|
Resolves git aliases and supports testing for both git and hub.
def git_support(fn, command):
"""Resolves git aliases and supports testing for both git and hub."""
# supports GitHub's `hub` command
# which is recommended to be used with `alias git=hub`
# but at this point, shell aliases have already been resolved
if not is_app(command, 'git', 'hub'):
return False
# perform git aliases expansion
if 'trace: alias expansion:' in command.output:
search = re.search("trace: alias expansion: ([^ ]*) => ([^\n]*)",
command.output)
alias = search.group(1)
# by default git quotes everything, for example:
# 'commit' '--amend'
# which is surprising and does not allow to easily test for
# eg. 'git commit'
expansion = ' '.join(shell.quote(part)
for part in shell.split_command(search.group(2)))
new_script = command.script.replace(alias, expansion)
command = command.update(script=new_script)
return fn(command)
|
Yields actions for pressed keys.
def read_actions():
"""Yields actions for pressed keys."""
while True:
key = get_key()
# Handle arrows, j/k (qwerty), and n/e (colemak)
if key in (const.KEY_UP, const.KEY_CTRL_N, 'k', 'e'):
yield const.ACTION_PREVIOUS
elif key in (const.KEY_DOWN, const.KEY_CTRL_P, 'j', 'n'):
yield const.ACTION_NEXT
elif key in (const.KEY_CTRL_C, 'q'):
yield const.ACTION_ABORT
elif key in ('\n', '\r'):
yield const.ACTION_SELECT
|
Returns:
- the first command when confirmation disabled;
- None when ctrl+c pressed;
- selected command.
:type corrected_commands: Iterable[thefuck.types.CorrectedCommand]
:rtype: thefuck.types.CorrectedCommand | None
def select_command(corrected_commands):
"""Returns:
- the first command when confirmation disabled;
- None when ctrl+c pressed;
- selected command.
:type corrected_commands: Iterable[thefuck.types.CorrectedCommand]
:rtype: thefuck.types.CorrectedCommand | None
"""
try:
selector = CommandSelector(corrected_commands)
except NoRuleMatched:
logs.failed('No fucks given' if get_alias() == 'fuck'
else 'Nothing found')
return
if not settings.require_confirmation:
logs.show_corrected_command(selector.value)
return selector.value
logs.confirm_text(selector.value)
for action in read_actions():
if action == const.ACTION_SELECT:
sys.stderr.write('\n')
return selector.value
elif action == const.ACTION_ABORT:
logs.failed('\nAborted')
return
elif action == const.ACTION_PREVIOUS:
selector.previous()
logs.confirm_text(selector.value)
elif action == const.ACTION_NEXT:
selector.next()
logs.confirm_text(selector.value)
|
Create a spawned process.
Modified version of pty.spawn with terminal size support.
def _spawn(shell, master_read):
"""Create a spawned process.
Modified version of pty.spawn with terminal size support.
"""
pid, master_fd = pty.fork()
if pid == pty.CHILD:
os.execlp(shell, shell)
try:
mode = tty.tcgetattr(pty.STDIN_FILENO)
tty.setraw(pty.STDIN_FILENO)
restore = True
except tty.error: # This is the same as termios.error
restore = False
_set_pty_size(master_fd)
signal.signal(signal.SIGWINCH, lambda *_: _set_pty_size(master_fd))
try:
pty._copy(master_fd, master_read, pty._read)
except OSError:
if restore:
tty.tcsetattr(pty.STDIN_FILENO, tty.TCSAFLUSH, mode)
os.close(master_fd)
return os.waitpid(pid, 0)[1]
|
Logs shell output to the `output`.
Works like unix script command with `-f` flag.
def shell_logger(output):
"""Logs shell output to the `output`.
Works like unix script command with `-f` flag.
"""
if not os.environ.get('SHELL'):
logs.warn("Shell logger doesn't support your platform.")
sys.exit(1)
fd = os.open(output, os.O_CREAT | os.O_TRUNC | os.O_RDWR)
os.write(fd, b'\x00' * const.LOG_SIZE_IN_BYTES)
buffer = mmap.mmap(fd, const.LOG_SIZE_IN_BYTES, mmap.MAP_SHARED, mmap.PROT_WRITE)
return_code = _spawn(os.environ['SHELL'], partial(_read, buffer))
sys.exit(return_code)
|
Get output of the script.
:param script: Console script.
:type script: str
:param expanded: Console script with expanded aliases.
:type expanded: str
:rtype: str
def get_output(script, expanded):
"""Get output of the script.
:param script: Console script.
:type script: str
:param expanded: Console script with expanded aliases.
:type expanded: str
:rtype: str
"""
if shell_logger.is_available():
return shell_logger.get_output(script)
if settings.instant_mode:
return read_log.get_output(script)
else:
return rerun.get_output(script, expanded)
|
Adds arguments to parser.
def _add_arguments(self):
"""Adds arguments to parser."""
self._parser.add_argument(
'-v', '--version',
action='store_true',
help="show program's version number and exit")
self._parser.add_argument(
'-a', '--alias',
nargs='?',
const=get_alias(),
help='[custom-alias-name] prints alias for current shell')
self._parser.add_argument(
'-l', '--shell-logger',
action='store',
help='log shell output to the file')
self._parser.add_argument(
'--enable-experimental-instant-mode',
action='store_true',
help='enable experimental instant mode, use on your own risk')
self._parser.add_argument(
'-h', '--help',
action='store_true',
help='show this help message and exit')
self._add_conflicting_arguments()
self._parser.add_argument(
'-d', '--debug',
action='store_true',
help='enable debug output')
self._parser.add_argument(
'--force-command',
action='store',
help=SUPPRESS)
self._parser.add_argument(
'command',
nargs='*',
help='command that should be fixed')
|
It's too dangerous to use `-y` and `-r` together.
def _add_conflicting_arguments(self):
"""It's too dangerous to use `-y` and `-r` together."""
group = self._parser.add_mutually_exclusive_group()
group.add_argument(
'-y', '--yes', '--yeah',
action='store_true',
help='execute fixed command without confirmation')
group.add_argument(
'-r', '--repeat',
action='store_true',
help='repeat on failure')
|
Prepares arguments by:
- removing placeholder and moving arguments after it to beginning,
we need this to distinguish arguments from `command` with ours;
- adding `--` before `command`, so our parse would ignore arguments
of `command`.
def _prepare_arguments(self, argv):
"""Prepares arguments by:
- removing placeholder and moving arguments after it to beginning,
we need this to distinguish arguments from `command` with ours;
- adding `--` before `command`, so our parse would ignore arguments
of `command`.
"""
if ARGUMENT_PLACEHOLDER in argv:
index = argv.index(ARGUMENT_PLACEHOLDER)
return argv[index + 1:] + ['--'] + argv[:index]
elif argv and not argv[0].startswith('-') and argv[0] != '--':
return ['--'] + argv
else:
return argv
|
Get custom npm scripts.
def get_scripts():
"""Get custom npm scripts."""
proc = Popen(['npm', 'run-script'], stdout=PIPE)
should_yeild = False
for line in proc.stdout.readlines():
line = line.decode()
if 'available via `npm run-script`:' in line:
should_yeild = True
continue
if should_yeild and re.match(r'^ [^ ]+', line):
yield line.strip().split(' ')[0]
|
Fills `settings` with values from `settings.py` and env.
def init(self, args=None):
"""Fills `settings` with values from `settings.py` and env."""
from .logs import exception
self._setup_user_dir()
self._init_settings_file()
try:
self.update(self._settings_from_file())
except Exception:
exception("Can't load settings from file", sys.exc_info())
try:
self.update(self._settings_from_env())
except Exception:
exception("Can't load settings from env", sys.exc_info())
self.update(self._settings_from_args(args))
|
Returns Path object representing the user config resource
def _get_user_dir_path(self):
"""Returns Path object representing the user config resource"""
xdg_config_home = os.environ.get('XDG_CONFIG_HOME', '~/.config')
user_dir = Path(xdg_config_home, 'thefuck').expanduser()
legacy_user_dir = Path('~', '.thefuck').expanduser()
# For backward compatibility use legacy '~/.thefuck' if it exists:
if legacy_user_dir.is_dir():
warn(u'Config path {} is deprecated. Please move to {}'.format(
legacy_user_dir, user_dir))
return legacy_user_dir
else:
return user_dir
|
Returns user config dir, create it when it doesn't exist.
def _setup_user_dir(self):
"""Returns user config dir, create it when it doesn't exist."""
user_dir = self._get_user_dir_path()
rules_dir = user_dir.joinpath('rules')
if not rules_dir.is_dir():
rules_dir.mkdir(parents=True)
self.user_dir = user_dir
|
Loads settings from file.
def _settings_from_file(self):
"""Loads settings from file."""
settings = load_source(
'settings', text_type(self.user_dir.joinpath('settings.py')))
return {key: getattr(settings, key)
for key in const.DEFAULT_SETTINGS.keys()
if hasattr(settings, key)}
|
Transforms rules list from env-string to python.
def _rules_from_env(self, val):
"""Transforms rules list from env-string to python."""
val = val.split(':')
if 'DEFAULT_RULES' in val:
val = const.DEFAULT_RULES + [rule for rule in val if rule != 'DEFAULT_RULES']
return val
|
Gets priority pairs from env.
def _priority_from_env(self, val):
"""Gets priority pairs from env."""
for part in val.split(':'):
try:
rule, priority = part.split('=')
yield rule, int(priority)
except ValueError:
continue
|
Transforms env-strings to python.
def _val_from_env(self, env, attr):
"""Transforms env-strings to python."""
val = os.environ[env]
if attr in ('rules', 'exclude_rules'):
return self._rules_from_env(val)
elif attr == 'priority':
return dict(self._priority_from_env(val))
elif attr in ('wait_command', 'history_limit', 'wait_slow_command',
'num_close_matches'):
return int(val)
elif attr in ('require_confirmation', 'no_colors', 'debug',
'alter_history', 'instant_mode'):
return val.lower() == 'true'
elif attr == 'slow_commands':
return val.split(':')
else:
return val
|
Loads settings from env.
def _settings_from_env(self):
"""Loads settings from env."""
return {attr: self._val_from_env(env, attr)
for env, attr in const.ENV_TO_ATTR.items()
if env in os.environ}
|
Loads settings from args.
def _settings_from_args(self, args):
"""Loads settings from args."""
if not args:
return {}
from_args = {}
if args.yes:
from_args['require_confirmation'] = not args.yes
if args.debug:
from_args['debug'] = args.debug
if args.repeat:
from_args['repeat'] = args.repeat
return from_args
|
When arguments order is wrong first argument will be destination.
def _get_destination(script_parts):
"""When arguments order is wrong first argument will be destination."""
for part in script_parts:
if part not in {'ln', '-s', '--symbolic'} and os.path.exists(part):
return part
|
Removes sudo before calling fn and adds it after.
def sudo_support(fn, command):
"""Removes sudo before calling fn and adds it after."""
if not command.script.startswith('sudo '):
return fn(command)
result = fn(command.update(script=command.script[5:]))
if result and isinstance(result, six.string_types):
return u'sudo {}'.format(result)
elif isinstance(result, list):
return [u'sudo {}'.format(x) for x in result]
else:
return result
|
Tries to kill the process otherwise just logs a debug message, the
process will be killed when thefuck terminates.
:type proc: Process
def _kill_process(proc):
"""Tries to kill the process otherwise just logs a debug message, the
process will be killed when thefuck terminates.
:type proc: Process
"""
try:
proc.kill()
except AccessDenied:
logs.debug(u'Rerun: process PID {} ({}) could not be terminated'.format(
proc.pid, proc.exe()))
|
Returns `True` if we can get output of the command in the
`settings.wait_command` time.
Command will be killed if it wasn't finished in the time.
:type popen: Popen
:rtype: bool
def _wait_output(popen, is_slow):
"""Returns `True` if we can get output of the command in the
`settings.wait_command` time.
Command will be killed if it wasn't finished in the time.
:type popen: Popen
:rtype: bool
"""
proc = Process(popen.pid)
try:
proc.wait(settings.wait_slow_command if is_slow
else settings.wait_command)
return True
except TimeoutExpired:
for child in proc.children(recursive=True):
_kill_process(child)
_kill_process(proc)
return False
|
Runs the script and obtains stdin/stderr.
:type script: str
:type expanded: str
:rtype: str | None
def get_output(script, expanded):
"""Runs the script and obtains stdin/stderr.
:type script: str
:type expanded: str
:rtype: str | None
"""
env = dict(os.environ)
env.update(settings.env)
is_slow = shlex.split(expanded) in settings.slow_commands
with logs.debug_time(u'Call: {}; with env: {}; is slow: '.format(
script, env, is_slow)):
result = Popen(expanded, shell=True, stdin=PIPE,
stdout=PIPE, stderr=STDOUT, env=env)
if _wait_output(result, is_slow):
output = result.stdout.read().decode('utf-8')
logs.debug(u'Received output: {}'.format(output))
return output
else:
logs.debug(u'Execution timed out!')
return None
|
Reads script output from log.
:type script: str
:rtype: str | None
def get_output(script):
"""Reads script output from log.
:type script: str
:rtype: str | None
"""
if six.PY2:
logs.warn('Experimental instant mode is Python 3+ only')
return None
if 'THEFUCK_OUTPUT_LOG' not in os.environ:
logs.warn("Output log isn't specified")
return None
if const.USER_COMMAND_MARK not in os.environ.get('PS1', ''):
logs.warn(
"PS1 doesn't contain user command mark, please ensure "
"that PS1 is not changed after The Fuck alias initialization")
return None
try:
with logs.debug_time(u'Read output from log'):
fd = os.open(os.environ['THEFUCK_OUTPUT_LOG'], os.O_RDONLY)
buffer = mmap.mmap(fd, const.LOG_SIZE_IN_BYTES, mmap.MAP_SHARED, mmap.PROT_READ)
_skip_old_lines(buffer)
lines = _get_output_lines(script, buffer)
output = '\n'.join(lines).strip()
logs.debug(u'Received output: {}'.format(output))
return output
except OSError:
logs.warn("Can't read output log")
return None
except ScriptNotInLog:
logs.warn("Script not found in output log")
return None
|
Gets the packages that provide the given command using `pkgfile`.
If the command is of the form `sudo foo`, searches for the `foo` command
instead.
def get_pkgfile(command):
""" Gets the packages that provide the given command using `pkgfile`.
If the command is of the form `sudo foo`, searches for the `foo` command
instead.
"""
try:
command = command.strip()
if command.startswith('sudo '):
command = command[5:]
command = command.split(" ")[0]
packages = subprocess.check_output(
['pkgfile', '-b', '-v', command],
universal_newlines=True, stderr=utils.DEVNULL
).splitlines()
return [package.split()[0] for package in packages]
except subprocess.CalledProcessError as err:
if err.returncode == 1 and err.output == "":
return []
else:
raise err
|
Returns a list of the child directories of the given parent directory
def _get_sub_dirs(parent):
"""Returns a list of the child directories of the given parent directory"""
return [child for child in os.listdir(parent) if os.path.isdir(os.path.join(parent, child))]
|
Attempt to rebuild the path string by spellchecking the directories.
If it fails (i.e. no directories are a close enough match), then it
defaults to the rules of cd_mkdir.
Change sensitivity by changing MAX_ALLOWED_DIFF. Default value is 0.6
def get_new_command(command):
"""
Attempt to rebuild the path string by spellchecking the directories.
If it fails (i.e. no directories are a close enough match), then it
defaults to the rules of cd_mkdir.
Change sensitivity by changing MAX_ALLOWED_DIFF. Default value is 0.6
"""
dest = command.script_parts[1].split(os.sep)
if dest[-1] == '':
dest = dest[:-1]
if dest[0] == '':
cwd = os.sep
dest = dest[1:]
elif six.PY2:
cwd = os.getcwdu()
else:
cwd = os.getcwd()
for directory in dest:
if directory == ".":
continue
elif directory == "..":
cwd = os.path.split(cwd)[0]
continue
best_matches = get_close_matches(directory, _get_sub_dirs(cwd), cutoff=MAX_ALLOWED_DIFF)
if best_matches:
cwd = os.path.join(cwd, best_matches[0])
else:
return cd_mkdir.get_new_command(command)
return u'cd "{0}"'.format(cwd)
|
Returns new command with replaced fields.
:rtype: Command
def update(self, **kwargs):
"""Returns new command with replaced fields.
:rtype: Command
"""
kwargs.setdefault('script', self.script)
kwargs.setdefault('output', self.output)
return Command(**kwargs)
|
Creates instance of `Command` from a list of script parts.
:type raw_script: [basestring]
:rtype: Command
:raises: EmptyCommand
def from_raw_script(cls, raw_script):
"""Creates instance of `Command` from a list of script parts.
:type raw_script: [basestring]
:rtype: Command
:raises: EmptyCommand
"""
script = format_raw_script(raw_script)
if not script:
raise EmptyCommand
expanded = shell.from_shell(script)
output = get_output(script, expanded)
return cls(expanded, output)
|
Creates rule instance from path.
:type path: pathlib.Path
:rtype: Rule
def from_path(cls, path):
"""Creates rule instance from path.
:type path: pathlib.Path
:rtype: Rule
"""
name = path.name[:-3]
with logs.debug_time(u'Importing rule: {};'.format(name)):
rule_module = load_source(name, str(path))
priority = getattr(rule_module, 'priority', DEFAULT_PRIORITY)
return cls(name, rule_module.match,
rule_module.get_new_command,
getattr(rule_module, 'enabled_by_default', True),
getattr(rule_module, 'side_effect', None),
settings.priority.get(name, priority),
getattr(rule_module, 'requires_output', True))
|
Returns `True` when rule enabled.
:rtype: bool
def is_enabled(self):
"""Returns `True` when rule enabled.
:rtype: bool
"""
if self.name in settings.exclude_rules:
return False
elif self.name in settings.rules:
return True
elif self.enabled_by_default and ALL_ENABLED in settings.rules:
return True
else:
return False
|
Returns `True` if rule matches the command.
:type command: Command
:rtype: bool
def is_match(self, command):
"""Returns `True` if rule matches the command.
:type command: Command
:rtype: bool
"""
if command.output is None and self.requires_output:
return False
try:
with logs.debug_time(u'Trying rule: {};'.format(self.name)):
if self.match(command):
return True
except Exception:
logs.rule_failed(self, sys.exc_info())
|
Returns generator with corrected commands.
:type command: Command
:rtype: Iterable[CorrectedCommand]
def get_corrected_commands(self, command):
"""Returns generator with corrected commands.
:type command: Command
:rtype: Iterable[CorrectedCommand]
"""
new_commands = self.get_new_command(command)
if not isinstance(new_commands, list):
new_commands = (new_commands,)
for n, new_command in enumerate(new_commands):
yield CorrectedCommand(script=new_command,
side_effect=self.side_effect,
priority=(n + 1) * self.priority)
|
Returns fixed commands script.
If `settings.repeat` is `True`, appends command with second attempt
of running fuck in case fixed command fails again.
def _get_script(self):
"""Returns fixed commands script.
If `settings.repeat` is `True`, appends command with second attempt
of running fuck in case fixed command fails again.
"""
if settings.repeat:
repeat_fuck = '{} --repeat {}--force-command {}'.format(
get_alias(),
'--debug ' if settings.debug else '',
shell.quote(self.script))
return shell.or_(self.script, repeat_fuck)
else:
return self.script
|
Runs command from rule for passed command.
:type old_cmd: Command
def run(self, old_cmd):
"""Runs command from rule for passed command.
:type old_cmd: Command
"""
if self.side_effect:
self.side_effect(old_cmd, self.script)
if settings.alter_history:
shell.put_to_history(self.script)
# This depends on correct setting of PYTHONIOENCODING by the alias:
logs.debug(u'PYTHONIOENCODING: {}'.format(
os.environ.get('PYTHONIOENCODING', '!!not-set!!')))
print(self._get_script())
|
Returns parent process pid.
def _get_shell_pid():
"""Returns parent process pid."""
proc = Process(os.getpid())
try:
return proc.parent().pid
except TypeError:
return proc.parent.pid
|
Records shell pid to tracker file.
def _record_first_run():
"""Records shell pid to tracker file."""
info = {'pid': _get_shell_pid(),
'time': time.time()}
mode = 'wb' if six.PY2 else 'w'
with _get_not_configured_usage_tracker_path().open(mode) as tracker:
json.dump(info, tracker)
|
Returns `True` when we know that `fuck` called second time.
def _is_second_run():
"""Returns `True` when we know that `fuck` called second time."""
tracker_path = _get_not_configured_usage_tracker_path()
if not tracker_path.exists():
return False
current_pid = _get_shell_pid()
with tracker_path.open('r') as tracker:
try:
info = json.load(tracker)
except ValueError:
return False
if not (isinstance(info, dict) and info.get('pid') == current_pid):
return False
return (_get_previous_command() == 'fuck' or
time.time() - info.get('time', 0) < const.CONFIGURATION_TIMEOUT)
|
Returns `True` when alias already in shell config.
def _is_already_configured(configuration_details):
"""Returns `True` when alias already in shell config."""
path = Path(configuration_details.path).expanduser()
with path.open('r') as shell_config:
return configuration_details.content in shell_config.read()
|
Adds alias to shell config.
def _configure(configuration_details):
"""Adds alias to shell config."""
path = Path(configuration_details.path).expanduser()
with path.open('a') as shell_config:
shell_config.write(u'\n')
shell_config.write(configuration_details.content)
shell_config.write(u'\n')
|
Shows useful information about how-to configure alias on a first run
and configure automatically on a second.
It'll be only visible when user type fuck and when alias isn't configured.
def main():
"""Shows useful information about how-to configure alias on a first run
and configure automatically on a second.
It'll be only visible when user type fuck and when alias isn't configured.
"""
settings.init()
configuration_details = shell.how_to_configure()
if (
configuration_details and
configuration_details.can_configure_automatically
):
if _is_already_configured(configuration_details):
logs.already_configured(configuration_details)
return
elif _is_second_run():
_configure(configuration_details)
logs.configured_successfully(configuration_details)
return
else:
_record_first_run()
logs.how_to_configure_alias(configuration_details)
|
Caches previous calls to the function.
def memoize(fn):
"""Caches previous calls to the function."""
memo = {}
@wraps(fn)
def wrapper(*args, **kwargs):
if not memoize.disabled:
key = pickle.dumps((args, kwargs))
if key not in memo:
memo[key] = fn(*args, **kwargs)
value = memo[key]
else:
# Memoize is disabled, call the function
value = fn(*args, **kwargs)
return value
return wrapper
|
Adds default values to settings if it not presented.
Usage:
@default_settings({'apt': '/usr/bin/apt'})
def match(command):
print(settings.apt)
def default_settings(params):
"""Adds default values to settings if it not presented.
Usage:
@default_settings({'apt': '/usr/bin/apt'})
def match(command):
print(settings.apt)
"""
def _default_settings(fn, command):
for k, w in params.items():
settings.setdefault(k, w)
return fn(command)
return decorator(_default_settings)
|
Returns closest match or just first from possibilities.
def get_closest(word, possibilities, cutoff=0.6, fallback_to_first=True):
"""Returns closest match or just first from possibilities."""
possibilities = list(possibilities)
try:
return difflib_get_close_matches(word, possibilities, 1, cutoff)[0]
except IndexError:
if fallback_to_first:
return possibilities[0]
|
Overrides `difflib.get_close_match` to controle argument `n`.
def get_close_matches(word, possibilities, n=None, cutoff=0.6):
"""Overrides `difflib.get_close_match` to controle argument `n`."""
if n is None:
n = settings.num_close_matches
return difflib_get_close_matches(word, possibilities, n, cutoff)
|
Replaces command line argument.
def replace_argument(script, from_, to):
"""Replaces command line argument."""
replaced_in_the_end = re.sub(u' {}$'.format(re.escape(from_)), u' {}'.format(to),
script, count=1)
if replaced_in_the_end != script:
return replaced_in_the_end
else:
return script.replace(
u' {} '.format(from_), u' {} '.format(to), 1)
|
Helper for *_no_command rules.
def replace_command(command, broken, matched):
"""Helper for *_no_command rules."""
new_cmds = get_close_matches(broken, matched, cutoff=0.1)
return [replace_argument(command.script, broken, new_cmd.strip())
for new_cmd in new_cmds]
|
Returns `True` if command is call to one of passed app names.
def is_app(command, *app_names, **kwargs):
"""Returns `True` if command is call to one of passed app names."""
at_least = kwargs.pop('at_least', 0)
if kwargs:
raise TypeError("got an unexpected keyword argument '{}'".format(kwargs.keys()))
if len(command.script_parts) > at_least:
return command.script_parts[0] in app_names
return False
|
Specifies that matching script is for on of app names.
def for_app(*app_names, **kwargs):
"""Specifies that matching script is for on of app names."""
def _for_app(fn, command):
if is_app(command, *app_names, **kwargs):
return fn(command)
else:
return False
return decorator(_for_app)
|
Caches function result in temporary file.
Cache will be expired when modification date of files from `depends_on`
will be changed.
Only functions should be wrapped in `cache`, not methods.
def cache(*depends_on):
"""Caches function result in temporary file.
Cache will be expired when modification date of files from `depends_on`
will be changed.
Only functions should be wrapped in `cache`, not methods.
"""
def cache_decorator(fn):
@memoize
@wraps(fn)
def wrapper(*args, **kwargs):
if cache.disabled:
return fn(*args, **kwargs)
else:
return _cache.get_value(fn, depends_on, args, kwargs)
return wrapper
return cache_decorator
|
Creates single script from a list of script parts.
:type raw_script: [basestring]
:rtype: basestring
def format_raw_script(raw_script):
"""Creates single script from a list of script parts.
:type raw_script: [basestring]
:rtype: basestring
"""
if six.PY2:
script = ' '.join(arg.decode('utf-8') for arg in raw_script)
else:
script = ' '.join(raw_script)
return script.strip()
|
Decides actions given observations information, and takes them in environment.
:param brain_info: A dictionary of brain names and BrainInfo from environment.
:return: an ActionInfo containing action, memories, values and an object
to be passed to add experiences
def get_action(self, brain_info: BrainInfo) -> ActionInfo:
"""
Decides actions given observations information, and takes them in environment.
:param brain_info: A dictionary of brain names and BrainInfo from environment.
:return: an ActionInfo containing action, memories, values and an object
to be passed to add experiences
"""
if len(brain_info.agents) == 0:
return ActionInfo([], [], [], None, None)
run_out = self.evaluate(brain_info)
return ActionInfo(
action=run_out.get('action'),
memory=run_out.get('memory_out'),
text=None,
value=run_out.get('value'),
outputs=run_out
)
|
Executes model.
:param feed_dict: Input dictionary mapping nodes to input data.
:param out_dict: Output dictionary mapping names to nodes.
:return: Dictionary mapping names to input data.
def _execute_model(self, feed_dict, out_dict):
"""
Executes model.
:param feed_dict: Input dictionary mapping nodes to input data.
:param out_dict: Output dictionary mapping names to nodes.
:return: Dictionary mapping names to input data.
"""
network_out = self.sess.run(list(out_dict.values()), feed_dict=feed_dict)
run_out = dict(zip(list(out_dict.keys()), network_out))
return run_out
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.