text
stringlengths
81
112k
Return a dictionary for updating `last_metrics` with `mets`. def add_metrics(last_metrics:Collection[Rank0Tensor], mets:Union[Rank0Tensor, Collection[Rank0Tensor]]): "Return a dictionary for updating `last_metrics` with `mets`." last_metrics,mets = listify(last_metrics),listify(mets) return {'last_metrics': last_metrics + mets}
Collects iterables lazily, rather than immediately. Docstring same as parent: https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.Executor Implmentation taken from this PR: https://github.com/python/cpython/pull/707 def map(self, fn, *iterables, timeout=None, chunksize=1, prefetch=None): """ Collects iterables lazily, rather than immediately. Docstring same as parent: https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.Executor Implmentation taken from this PR: https://github.com/python/cpython/pull/707 """ if timeout is not None: end_time = timeout + time.time() if prefetch is None: prefetch = self._max_workers if prefetch < 0: raise ValueError("prefetch count may not be negative") argsiter = zip(*iterables) fs = collections.deque(self.submit(fn, *args) for args in itertools.islice(argsiter, self._max_workers+prefetch)) # Yield must be hidden in closure so that the futures are submitted before the first iterator value is required. def result_iterator(): nonlocal argsiter try: while fs: res = fs[0].result() if timeout is None else fs[0].result(end_time-time.time()) # Got a result, future needn't be cancelled del fs[0] # Dispatch next task before yielding to keep pipeline full if argsiter: try: args = next(argsiter) except StopIteration: argsiter = None else: fs.append(self.submit(fn, *args)) yield res finally: for future in fs: future.cancel() return result_iterator()
Generate documentation for fastai library in HTML (asciidoctor required) :param str src: The absolute/relative path of source file/dir def gen_ascii_docs(src='fastai'): """Generate documentation for fastai library in HTML (asciidoctor required) :param str src: The absolute/relative path of source file/dir """ os.chdir(Path(__file__).absolute().parent) with working_directory('..'): path = Path(src) if path.is_dir(): file_paths = list(path.glob('**/*.py')) else: file_paths = [path] pat = re.compile('^(?!__init__).*.py\Z') for file_path in file_paths: if pat.match(file_path.name): file_path.parent.mkdir(parents=True, exist_ok=True) with working_directory('..'): tmpl_str = parse_module(file_path) (file_path.parent/(file_path.name.rsplit('.',1)[0] + '.adoc.tmpl')).write_text(tmpl_str) (file_path.parent/(file_path.name.rsplit('.',1)[0] + '.adoc')).write_text(re.sub(r"{{(.*?)}}", parse_tmpl, tmpl_str, flags=re.DOTALL)) if path.is_dir(): subprocess.call(['asciidoctor', str(path) + '/**/*.adoc']) else: subprocess.call(['asciidoctor', str(path).rsplit('.',1)[0] + '.adoc'])
Retrieves new batch of DatasetType, and detaches it. def _get_new_batch(self, ds_type:DatasetType)->Collection[Tensor]: "Retrieves new batch of DatasetType, and detaches it." return self.learn.data.one_batch(ds_type=ds_type, detach=True, denorm=False, cpu=False)
one_batch function is extremely slow with large datasets. This is caching the result as an optimization. def _update_batches_if_needed(self)->None: "one_batch function is extremely slow with large datasets. This is caching the result as an optimization." if self.learn.data.valid_dl is None: return # Running learning rate finder, so return update_batches = self.data is not self.learn.data if not update_batches: return self.data = self.learn.data self.trn_batch = self._get_new_batch(ds_type=DatasetType.Train) self.val_batch = self._get_new_batch(ds_type=DatasetType.Valid)
Writes gradient statistics to Tensorboard. def _write_model_stats(self, iteration:int)->None: "Writes gradient statistics to Tensorboard." self.stats_writer.write(model=self.learn.model, iteration=iteration, tbwriter=self.tbwriter)
Writes training loss to Tensorboard. def _write_training_loss(self, iteration:int, last_loss:Tensor)->None: "Writes training loss to Tensorboard." scalar_value = to_np(last_loss) tag = self.metrics_root + 'train_loss' self.tbwriter.add_scalar(tag=tag, scalar_value=scalar_value, global_step=iteration)
Writes model weight histograms to Tensorboard. def _write_weight_histograms(self, iteration:int)->None: "Writes model weight histograms to Tensorboard." self.hist_writer.write(model=self.learn.model, iteration=iteration, tbwriter=self.tbwriter)
Writes single scalar value to Tensorboard. def _write_scalar(self, name:str, scalar_value, iteration:int)->None: "Writes single scalar value to Tensorboard." tag = self.metrics_root + name self.tbwriter.add_scalar(tag=tag, scalar_value=scalar_value, global_step=iteration)
Writes training metrics to Tensorboard. def _write_metrics(self, iteration:int, last_metrics:MetricsList, start_idx:int=2)->None: "Writes training metrics to Tensorboard." recorder = self.learn.recorder for i, name in enumerate(recorder.names[start_idx:]): if last_metrics is None or len(last_metrics) < i+1: return scalar_value = last_metrics[i] self._write_scalar(name=name, scalar_value=scalar_value, iteration=iteration)
Callback function that writes batch end appropriate data to Tensorboard. def on_batch_end(self, last_loss:Tensor, iteration:int, **kwargs)->None: "Callback function that writes batch end appropriate data to Tensorboard." if iteration == 0: return self._update_batches_if_needed() if iteration % self.loss_iters == 0: self._write_training_loss(iteration=iteration, last_loss=last_loss) if iteration % self.hist_iters == 0: self._write_weight_histograms(iteration=iteration)
Callback function that writes backward end appropriate data to Tensorboard. def on_backward_end(self, iteration:int, **kwargs)->None: "Callback function that writes backward end appropriate data to Tensorboard." if iteration == 0: return self._update_batches_if_needed() if iteration % self.stats_iters == 0: self._write_model_stats(iteration=iteration)
Callback function that writes epoch end appropriate data to Tensorboard. def on_epoch_end(self, last_metrics:MetricsList, iteration:int, **kwargs)->None: "Callback function that writes epoch end appropriate data to Tensorboard." self._write_metrics(iteration=iteration, last_metrics=last_metrics)
Writes model weight histograms to Tensorboard. def _write_weight_histograms(self, iteration:int)->None: "Writes model weight histograms to Tensorboard." generator, critic = self.learn.gan_trainer.generator, self.learn.gan_trainer.critic self.hist_writer.write(model=generator, iteration=iteration, tbwriter=self.tbwriter, name='generator') self.hist_writer.write(model=critic, iteration=iteration, tbwriter=self.tbwriter, name='critic')
Writes gradient statistics for generator to Tensorboard. def _write_gen_model_stats(self, iteration:int)->None: "Writes gradient statistics for generator to Tensorboard." generator = self.learn.gan_trainer.generator self.stats_writer.write(model=generator, iteration=iteration, tbwriter=self.tbwriter, name='gen_model_stats') self.gen_stats_updated = True
Writes gradient statistics for critic to Tensorboard. def _write_critic_model_stats(self, iteration:int)->None: "Writes gradient statistics for critic to Tensorboard." critic = self.learn.gan_trainer.critic self.stats_writer.write(model=critic, iteration=iteration, tbwriter=self.tbwriter, name='crit_model_stats') self.crit_stats_updated = True
Writes gradient statistics to Tensorboard. def _write_model_stats(self, iteration:int)->None: "Writes gradient statistics to Tensorboard." # We don't want to write stats when model is not iterated on and hence has zeroed out gradients gen_mode = self.learn.gan_trainer.gen_mode if gen_mode and not self.gen_stats_updated: self._write_gen_model_stats(iteration=iteration) if not gen_mode and not self.crit_stats_updated: self._write_critic_model_stats(iteration=iteration)
Writes training loss to Tensorboard. def _write_training_loss(self, iteration:int, last_loss:Tensor)->None: "Writes training loss to Tensorboard." recorder = self.learn.gan_trainer.recorder if len(recorder.losses) == 0: return scalar_value = to_np((recorder.losses[-1:])[0]) tag = self.metrics_root + 'train_loss' self.tbwriter.add_scalar(tag=tag, scalar_value=scalar_value, global_step=iteration)
Writes model generated, original and real images to Tensorboard. def _write_images(self, iteration:int)->None: "Writes model generated, original and real images to Tensorboard." trainer = self.learn.gan_trainer #TODO: Switching gen_mode temporarily seems a bit hacky here. Certainly not a good side-effect. Is there a better way? gen_mode = trainer.gen_mode try: trainer.switch(gen_mode=True) self.img_gen_vis.write(learn=self.learn, trn_batch=self.trn_batch, val_batch=self.val_batch, iteration=iteration, tbwriter=self.tbwriter) finally: trainer.switch(gen_mode=gen_mode)
Callback function that writes batch end appropriate data to Tensorboard. def on_batch_end(self, iteration:int, **kwargs)->None: "Callback function that writes batch end appropriate data to Tensorboard." super().on_batch_end(iteration=iteration, **kwargs) if iteration == 0: return if iteration % self.visual_iters == 0: self._write_images(iteration=iteration)
Callback function that writes backward end appropriate data to Tensorboard. def on_backward_end(self, iteration:int, **kwargs)->None: "Callback function that writes backward end appropriate data to Tensorboard." if iteration == 0: return self._update_batches_if_needed() #TODO: This could perhaps be implemented as queues of requests instead but that seemed like overkill. # But I'm not the biggest fan of maintaining these boolean flags either... Review pls. if iteration % self.stats_iters == 0: self.gen_stats_updated, self.crit_stats_updated = False, False if not (self.gen_stats_updated and self.crit_stats_updated): self._write_model_stats(iteration=iteration)
Writes model generated, original and real images to Tensorboard def _write_images(self, iteration:int)->None: "Writes model generated, original and real images to Tensorboard" self.img_gen_vis.write(learn=self.learn, trn_batch=self.trn_batch, val_batch=self.val_batch, iteration=iteration, tbwriter=self.tbwriter)
Queues up an asynchronous write request to Tensorboard. def request_write(self, request: TBWriteRequest)->None: "Queues up an asynchronous write request to Tensorboard." if self.stop_request.isSet(): return self.queue.put(request)
Processes queued up write requests asynchronously to Tensorboard. def _queue_processor(self)->None: "Processes queued up write requests asynchronously to Tensorboard." while not self.stop_request.isSet(): while not self.queue.empty(): if self.stop_request.isSet(): return request = self.queue.get() request.write() sleep(0.2)
Factory method to convert a batch of model images to a list of ModelImageSet. def get_list_from_model(learn:Learner, ds_type:DatasetType, batch:Tuple)->[]: "Factory method to convert a batch of model images to a list of ModelImageSet." image_sets = [] x,y = batch[0],batch[1] preds = learn.pred_batch(ds_type=ds_type, batch=(x,y), reconstruct=True) for orig_px, real_px, gen in zip(x,y,preds): orig, real = Image(px=orig_px), Image(px=real_px) image_set = ModelImageSet(orig=orig, real=real, gen=gen) image_sets.append(image_set) return image_sets
Writes single model histogram to Tensorboard. def _write_histogram(self, param_name:str, values)->None: "Writes single model histogram to Tensorboard." tag = self.name + '/weights/' + param_name self.tbwriter.add_histogram(tag=tag, values=values, global_step=self.iteration)
Writes model histograms to Tensorboard. def write(self)->None: "Writes model histograms to Tensorboard." for param_name, values in self.params: self._write_histogram(param_name=param_name, values=values)
Writes model histograms to Tensorboard. def write(self, model:nn.Module, iteration:int, tbwriter:SummaryWriter, name:str='model')->None: "Writes model histograms to Tensorboard." request = HistogramTBRequest(model=model, iteration=iteration, tbwriter=tbwriter, name=name) asyncTBWriter.request_write(request)
Writes a single scalar value for a gradient statistic to Tensorboard. def _add_gradient_scalar(self, name:str, scalar_value)->None: "Writes a single scalar value for a gradient statistic to Tensorboard." tag = self.name + '/gradients/' + name self.tbwriter.add_scalar(tag=tag, scalar_value=scalar_value, global_step=self.iteration)
Writes the average norm of the gradients to Tensorboard. def _write_avg_norm(self, norms:[])->None: "Writes the average norm of the gradients to Tensorboard." avg_norm = sum(norms)/len(self.gradients) self._add_gradient_scalar('avg_norm', scalar_value=avg_norm)
Writes the median norm of the gradients to Tensorboard. def _write_median_norm(self, norms:[])->None: "Writes the median norm of the gradients to Tensorboard." median_norm = statistics.median(norms) self._add_gradient_scalar('median_norm', scalar_value=median_norm)
Writes the maximum norm of the gradients to Tensorboard. def _write_max_norm(self, norms:[])->None: "Writes the maximum norm of the gradients to Tensorboard." max_norm = max(norms) self._add_gradient_scalar('max_norm', scalar_value=max_norm)
Writes the minimum norm of the gradients to Tensorboard. def _write_min_norm(self, norms:[])->None: "Writes the minimum norm of the gradients to Tensorboard." min_norm = min(norms) self._add_gradient_scalar('min_norm', scalar_value=min_norm)
Writes the number of zeroes in the gradients to Tensorboard. def _write_num_zeros(self)->None: "Writes the number of zeroes in the gradients to Tensorboard." gradient_nps = [to_np(x.data) for x in self.gradients] num_zeros = sum((np.asarray(x) == 0.0).sum() for x in gradient_nps) self._add_gradient_scalar('num_zeros', scalar_value=num_zeros)
Writes the average of the gradients to Tensorboard. def _write_avg_gradient(self)->None: "Writes the average of the gradients to Tensorboard." avg_gradient = sum(x.data.mean() for x in self.gradients)/len(self.gradients) self._add_gradient_scalar('avg_gradient', scalar_value=avg_gradient)
Writes the median of the gradients to Tensorboard. def _write_median_gradient(self)->None: "Writes the median of the gradients to Tensorboard." median_gradient = statistics.median(x.data.median() for x in self.gradients) self._add_gradient_scalar('median_gradient', scalar_value=median_gradient)
Writes the maximum of the gradients to Tensorboard. def _write_max_gradient(self)->None: "Writes the maximum of the gradients to Tensorboard." max_gradient = max(x.data.max() for x in self.gradients) self._add_gradient_scalar('max_gradient', scalar_value=max_gradient)
Writes the minimum of the gradients to Tensorboard. def _write_min_gradient(self)->None: "Writes the minimum of the gradients to Tensorboard." min_gradient = min(x.data.min() for x in self.gradients) self._add_gradient_scalar('min_gradient', scalar_value=min_gradient)
Writes model gradient statistics to Tensorboard. def write(self)->None: "Writes model gradient statistics to Tensorboard." if len(self.gradients) == 0: return norms = [x.data.norm() for x in self.gradients] self._write_avg_norm(norms=norms) self._write_median_norm(norms=norms) self._write_max_norm(norms=norms) self._write_min_norm(norms=norms) self._write_num_zeros() self._write_avg_gradient() self._write_median_gradient() self._write_max_gradient() self._write_min_gradient()
Writes list of images as tensors to Tensorboard. def _write_images(self, name:str, images:[Tensor])->None: "Writes list of images as tensors to Tensorboard." tag = self.ds_type.name + ' ' + name self.tbwriter.add_image(tag=tag, img_tensor=vutils.make_grid(images, normalize=True), global_step=self.iteration)
Gets list of image tensors from lists of Image objects, as a tuple of original, generated and real(target) images. def _get_image_tensors(self)->([Tensor], [Tensor], [Tensor]): "Gets list of image tensors from lists of Image objects, as a tuple of original, generated and real(target) images." orig_images, gen_images, real_images = [], [], [] for image_set in self.image_sets: orig_images.append(image_set.orig.px) gen_images.append(image_set.gen.px) real_images.append(image_set.real.px) return orig_images, gen_images, real_images
Writes original, generated and real(target) images to Tensorboard. def write(self)->None: "Writes original, generated and real(target) images to Tensorboard." orig_images, gen_images, real_images = self._get_image_tensors() self._write_images(name='orig images', images=orig_images) self._write_images(name='gen images', images=gen_images) self._write_images(name='real images', images=real_images)
Writes training and validation batch images to Tensorboard. def write(self, learn:Learner, trn_batch:Tuple, val_batch:Tuple, iteration:int, tbwriter:SummaryWriter)->None: "Writes training and validation batch images to Tensorboard." self._write_for_dstype(learn=learn, batch=val_batch, iteration=iteration, tbwriter=tbwriter, ds_type=DatasetType.Valid) self._write_for_dstype(learn=learn, batch=trn_batch, iteration=iteration, tbwriter=tbwriter, ds_type=DatasetType.Train)
Writes batch images of specified DatasetType to Tensorboard. def _write_for_dstype(self, learn:Learner, batch:Tuple, iteration:int, tbwriter:SummaryWriter, ds_type:DatasetType)->None: "Writes batch images of specified DatasetType to Tensorboard." request = ImageTBRequest(learn=learn, batch=batch, iteration=iteration, tbwriter=tbwriter, ds_type=ds_type) asyncTBWriter.request_write(request)
Writes single model graph to Tensorboard. def write(self)->None: "Writes single model graph to Tensorboard." self.tbwriter.add_graph(model=self.model, input_to_model=self.input_to_model)
Writes model graph to Tensorboard. def write(self, model:nn.Module, tbwriter:SummaryWriter, input_to_model:torch.Tensor)->None: "Writes model graph to Tensorboard." request = GraphTBRequest(model=model, tbwriter=tbwriter, input_to_model=input_to_model) asyncTBWriter.request_write(request)
During training, batch norm layers keep track of a running mean and variance of the previous layer's activations. Because the parameters of the SWA model are computed as the average of other models' parameters, the SWA model never sees the training data itself, and therefore has no opportunity to compute the correct batch norm statistics. Before performing inference with the SWA model, we perform a single pass over the training data to calculate an accurate running mean and variance for each batch norm layer. def fix_batchnorm(swa_model, train_dl): """ During training, batch norm layers keep track of a running mean and variance of the previous layer's activations. Because the parameters of the SWA model are computed as the average of other models' parameters, the SWA model never sees the training data itself, and therefore has no opportunity to compute the correct batch norm statistics. Before performing inference with the SWA model, we perform a single pass over the training data to calculate an accurate running mean and variance for each batch norm layer. """ bn_modules = [] swa_model.apply(lambda module: collect_bn_modules(module, bn_modules)) if not bn_modules: return swa_model.train() for module in bn_modules: module.running_mean = torch.zeros_like(module.running_mean) module.running_var = torch.ones_like(module.running_var) momenta = [m.momentum for m in bn_modules] inputs_seen = 0 for (*x,y) in iter(train_dl): xs = V(x) batch_size = xs[0].size(0) momentum = batch_size / (inputs_seen + batch_size) for module in bn_modules: module.momentum = momentum res = swa_model(*xs) inputs_seen += batch_size for module, momentum in zip(bn_modules, momenta): module.momentum = momentum
Wraps h in new Variables, to detach them from their history. def repackage_var(h): """Wraps h in new Variables, to detach them from their history.""" if IS_TORCH_04: return h.detach() if type(h) == torch.Tensor else tuple(repackage_var(v) for v in h) else: return Variable(h.data) if type(h) == Variable else tuple(repackage_var(v) for v in h)
Returns a SequentialRNN model. A RNN_Encoder layer is instantiated using the parameters provided. This is followed by the creation of a LinearDecoder layer. Also by default (i.e. tie_weights = True), the embedding matrix used in the RNN_Encoder is used to instantiate the weights for the LinearDecoder layer. The SequentialRNN layer is the native torch's Sequential wrapper that puts the RNN_Encoder and LinearDecoder layers sequentially in the model. Args: n_tok (int): number of unique vocabulary words (or tokens) in the source dataset emb_sz (int): the embedding size to use to encode each token n_hid (int): number of hidden activation per LSTM layer n_layers (int): number of LSTM layers to use in the architecture pad_token (int): the int value used for padding text. dropouth (float): dropout to apply to the activations going from one LSTM layer to another dropouti (float): dropout to apply to the input layer. dropoute (float): dropout to apply to the embedding layer. wdrop (float): dropout used for a LSTM's internal (or hidden) recurrent weights. tie_weights (bool): decide if the weights of the embedding matrix in the RNN encoder should be tied to the weights of the LinearDecoder layer. qrnn (bool): decide if the model is composed of LSTMS (False) or QRNNs (True). bias (bool): decide if the decoder should have a bias layer or not. Returns: A SequentialRNN model def get_language_model(n_tok, emb_sz, n_hid, n_layers, pad_token, dropout=0.4, dropouth=0.3, dropouti=0.5, dropoute=0.1, wdrop=0.5, tie_weights=True, qrnn=False, bias=False): """Returns a SequentialRNN model. A RNN_Encoder layer is instantiated using the parameters provided. This is followed by the creation of a LinearDecoder layer. Also by default (i.e. tie_weights = True), the embedding matrix used in the RNN_Encoder is used to instantiate the weights for the LinearDecoder layer. The SequentialRNN layer is the native torch's Sequential wrapper that puts the RNN_Encoder and LinearDecoder layers sequentially in the model. Args: n_tok (int): number of unique vocabulary words (or tokens) in the source dataset emb_sz (int): the embedding size to use to encode each token n_hid (int): number of hidden activation per LSTM layer n_layers (int): number of LSTM layers to use in the architecture pad_token (int): the int value used for padding text. dropouth (float): dropout to apply to the activations going from one LSTM layer to another dropouti (float): dropout to apply to the input layer. dropoute (float): dropout to apply to the embedding layer. wdrop (float): dropout used for a LSTM's internal (or hidden) recurrent weights. tie_weights (bool): decide if the weights of the embedding matrix in the RNN encoder should be tied to the weights of the LinearDecoder layer. qrnn (bool): decide if the model is composed of LSTMS (False) or QRNNs (True). bias (bool): decide if the decoder should have a bias layer or not. Returns: A SequentialRNN model """ rnn_enc = RNN_Encoder(n_tok, emb_sz, n_hid=n_hid, n_layers=n_layers, pad_token=pad_token, dropouth=dropouth, dropouti=dropouti, dropoute=dropoute, wdrop=wdrop, qrnn=qrnn) enc = rnn_enc.encoder if tie_weights else None return SequentialRNN(rnn_enc, LinearDecoder(n_tok, emb_sz, dropout, tie_encoder=enc, bias=bias))
Invoked during the forward propagation of the RNN_Encoder module. Args: input (Tensor): input of shape (sentence length x batch_size) Returns: raw_outputs (tuple(list (Tensor), list(Tensor)): list of tensors evaluated from each RNN layer without using dropouth, list of tensors evaluated from each RNN layer using dropouth, def forward(self, input): """ Invoked during the forward propagation of the RNN_Encoder module. Args: input (Tensor): input of shape (sentence length x batch_size) Returns: raw_outputs (tuple(list (Tensor), list(Tensor)): list of tensors evaluated from each RNN layer without using dropouth, list of tensors evaluated from each RNN layer using dropouth, """ sl,bs = input.size() if bs!=self.bs: self.bs=bs self.reset() with set_grad_enabled(self.training): emb = self.encoder_with_dropout(input, dropout=self.dropoute if self.training else 0) emb = self.dropouti(emb) raw_output = emb new_hidden,raw_outputs,outputs = [],[],[] for l, (rnn,drop) in enumerate(zip(self.rnns, self.dropouths)): current_input = raw_output with warnings.catch_warnings(): warnings.simplefilter("ignore") raw_output, new_h = rnn(raw_output, self.hidden[l]) new_hidden.append(new_h) raw_outputs.append(raw_output) if l != self.n_layers - 1: raw_output = drop(raw_output) outputs.append(raw_output) self.hidden = repackage_var(new_hidden) return raw_outputs, outputs
Replace repetitions at the character level in `t`. def replace_rep(t:str) -> str: "Replace repetitions at the character level in `t`." def _replace_rep(m:Collection[str]) -> str: c,cc = m.groups() return f' {TK_REP} {len(cc)+1} {c} ' re_rep = re.compile(r'(\S)(\1{3,})') return re_rep.sub(_replace_rep, t)
Replace word repetitions in `t`. def replace_wrep(t:str) -> str: "Replace word repetitions in `t`." def _replace_wrep(m:Collection[str]) -> str: c,cc = m.groups() return f' {TK_WREP} {len(cc.split())+1} {c} ' re_wrep = re.compile(r'(\b\w+\W+)(\1{3,})') return re_wrep.sub(_replace_wrep, t)
List of replacements from html strings in `x`. def fix_html(x:str) -> str: "List of replacements from html strings in `x`." re1 = re.compile(r' +') x = x.replace('#39;', "'").replace('amp;', '&').replace('#146;', "'").replace( 'nbsp;', ' ').replace('#36;', '$').replace('\\n', "\n").replace('quot;', "'").replace( '<br />', "\n").replace('\\"', '"').replace('<unk>',UNK).replace(' @.@ ','.').replace( ' @-@ ','-').replace(' @,@ ',',').replace('\\', ' \\ ') return re1.sub(' ', html.unescape(x))
Replace tokens in ALL CAPS in `x` by their lower version and add `TK_UP` before. def replace_all_caps(x:Collection[str]) -> Collection[str]: "Replace tokens in ALL CAPS in `x` by their lower version and add `TK_UP` before." res = [] for t in x: if t.isupper() and len(t) > 1: res.append(TK_UP); res.append(t.lower()) else: res.append(t) return res
Replace all Capitalized tokens in `x` by their lower version and add `TK_MAJ` before. def deal_caps(x:Collection[str]) -> Collection[str]: "Replace all Capitalized tokens in `x` by their lower version and add `TK_MAJ` before." res = [] for t in x: if t == '': continue if t[0].isupper() and len(t) > 1 and t[1:].islower(): res.append(TK_MAJ) res.append(t.lower()) return res
Process one text `t` with tokenizer `tok`. def process_text(self, t:str, tok:BaseTokenizer) -> List[str]: "Process one text `t` with tokenizer `tok`." for rule in self.pre_rules: t = rule(t) toks = tok.tokenizer(t) for rule in self.post_rules: toks = rule(toks) return toks
Process a list of `texts` in one process. def _process_all_1(self, texts:Collection[str]) -> List[List[str]]: "Process a list of `texts` in one process." tok = self.tok_func(self.lang) if self.special_cases: tok.add_special_cases(self.special_cases) return [self.process_text(str(t), tok) for t in texts]
Process a list of `texts`. def process_all(self, texts:Collection[str]) -> List[List[str]]: "Process a list of `texts`." if self.n_cpus <= 1: return self._process_all_1(texts) with ProcessPoolExecutor(self.n_cpus) as e: return sum(e.map(self._process_all_1, partition_by_cores(texts, self.n_cpus)), [])
Convert a list of tokens `t` to their ids. def numericalize(self, t:Collection[str]) -> List[int]: "Convert a list of tokens `t` to their ids." return [self.stoi[w] for w in t]
Convert a list of `nums` to their tokens. def textify(self, nums:Collection[int], sep=' ') -> List[str]: "Convert a list of `nums` to their tokens." return sep.join([self.itos[i] for i in nums]) if sep is not None else [self.itos[i] for i in nums]
Create a vocabulary from a set of `tokens`. def create(cls, tokens:Tokens, max_vocab:int, min_freq:int) -> 'Vocab': "Create a vocabulary from a set of `tokens`." freq = Counter(p for o in tokens for p in o) itos = [o for o,c in freq.most_common(max_vocab) if c >= min_freq] for o in reversed(defaults.text_spec_tok): if o in itos: itos.remove(o) itos.insert(0, o) return cls(itos)
Load the `Vocab` contained in `path` def load(cls, path): "Load the `Vocab` contained in `path`" itos = pickle.load(open(path, 'rb')) return cls(itos)
plots loss function as function of iterations. When used in Jupyternotebook, plot will be displayed in notebook. Else, plot will be displayed in console and both plot and loss are saved in save_path. def plot_loss(self, n_skip=10, n_skip_end=5): ''' plots loss function as function of iterations. When used in Jupyternotebook, plot will be displayed in notebook. Else, plot will be displayed in console and both plot and loss are saved in save_path. ''' if not in_ipynb(): plt.switch_backend('agg') plt.plot(self.iterations[n_skip:-n_skip_end], self.losses[n_skip:-n_skip_end]) if not in_ipynb(): plt.savefig(os.path.join(self.save_path, 'loss_plot.png')) np.save(os.path.join(self.save_path, 'losses.npy'), self.losses[10:])
Plots learning rate in jupyter notebook or console, depending on the enviroment of the learner. def plot_lr(self): '''Plots learning rate in jupyter notebook or console, depending on the enviroment of the learner.''' if not in_ipynb(): plt.switch_backend('agg') if self.record_mom: fig, axs = plt.subplots(1,2,figsize=(12,4)) for i in range(0,2): axs[i].set_xlabel('iterations') axs[0].set_ylabel('learning rate') axs[1].set_ylabel('momentum') axs[0].plot(self.iterations,self.lrs) axs[1].plot(self.iterations,self.momentums) else: plt.xlabel("iterations") plt.ylabel("learning rate") plt.plot(self.iterations, self.lrs) if not in_ipynb(): plt.savefig(os.path.join(self.save_path, 'lr_plot.png'))
Plots the loss function with respect to learning rate, in log scale. def plot(self, n_skip=10, n_skip_end=5): ''' Plots the loss function with respect to learning rate, in log scale. ''' plt.ylabel("validation loss") plt.xlabel("learning rate (log scale)") plt.plot(self.lrs[n_skip:-(n_skip_end+1)], self.losses[n_skip:-(n_skip_end+1)]) plt.xscale('log')
Plots the lr rate/momentum schedule def plot_lr(self, show_text=True, show_moms=True): """Plots the lr rate/momentum schedule""" phase_limits = [0] for nb_batch, phase in zip(self.nb_batches, self.phases): phase_limits.append(phase_limits[-1] + nb_batch * phase.epochs) if not in_ipynb(): plt.switch_backend('agg') np_plts = 2 if show_moms else 1 fig, axs = plt.subplots(1,np_plts,figsize=(6*np_plts,4)) if not show_moms: axs = [axs] for i in range(np_plts): axs[i].set_xlabel('iterations') axs[0].set_ylabel('learning rate') axs[0].plot(self.iterations,self.lrs) if show_moms: axs[1].set_ylabel('momentum') axs[1].plot(self.iterations,self.momentums) if show_text: for i, phase in enumerate(self.phases): text = phase.opt_fn.__name__ if phase.wds is not None: text+='\nwds='+str(phase.wds) if phase.beta is not None: text+='\nbeta='+str(phase.beta) for k in range(np_plts): if i < len(self.phases)-1: draw_line(axs[k], phase_limits[i+1]) draw_text(axs[k], (phase_limits[i]+phase_limits[i+1])/2, text) if not in_ipynb(): plt.savefig(os.path.join(self.save_path, 'lr_plot.png'))
Distributed training of Imagenette. def main( gpu:Param("GPU to run on", str)=None, woof: Param("Use imagewoof (otherwise imagenette)", int)=0, lr: Param("Learning rate", float)=1e-3, size: Param("Size (px: 128,192,224)", int)=128, alpha: Param("Alpha", float)=0.99, mom: Param("Momentum", float)=0.9, eps: Param("epsilon", float)=1e-6, epochs: Param("Number of epochs", int)=5, bs: Param("Batch size", int)=256, mixup: Param("Mixup", float)=0., opt: Param("Optimizer (adam,rms,sgd)", str)='adam', arch: Param("Architecture (xresnet34, xresnet50, presnet34, presnet50)", str)='xresnet50', dump: Param("Print model; don't train", int)=0, ): "Distributed training of Imagenette." gpu = setup_distrib(gpu) if gpu is None: bs *= torch.cuda.device_count() if opt=='adam' : opt_func = partial(optim.Adam, betas=(mom,alpha), eps=eps) elif opt=='rms' : opt_func = partial(optim.RMSprop, alpha=alpha, eps=eps) elif opt=='sgd' : opt_func = partial(optim.SGD, momentum=mom) data = get_data(size, woof, bs) bs_rat = bs/256 if gpu is not None: bs_rat *= num_distrib() if not gpu: print(f'lr: {lr}; eff_lr: {lr*bs_rat}; size: {size}; alpha: {alpha}; mom: {mom}; eps: {eps}') lr *= bs_rat m = globals()[arch] learn = (Learner(data, m(c_out=10), wd=1e-2, opt_func=opt_func, metrics=[accuracy,top_k_accuracy], bn_wd=False, true_wd=True, loss_func = LabelSmoothingCrossEntropy()) ) if dump: print(learn.model); exit() if mixup: learn = learn.mixup(alpha=mixup) learn = learn.to_fp16(dynamic=True) if gpu is None: learn.to_parallel() elif num_distrib()>1: learn.to_distributed(gpu) # Requires `-m fastai.launch` learn.fit_one_cycle(epochs, lr, div_factor=10, pct_start=0.3)
Test if `last_loss` is NaN and interrupts training. def on_batch_end(self, last_loss, epoch, num_batch, **kwargs:Any)->None: "Test if `last_loss` is NaN and interrupts training." if self.stop: return True #to skip validation after stopping during training if torch.isnan(last_loss): print (f'Epoch/Batch ({epoch}/{num_batch}): Invalid loss, terminating training.') return {'stop_epoch': True, 'stop_training': True, 'skip_validate': True}
Initializes the best value. def on_train_begin(self, **kwargs:Any)->None: "Initializes the best value." self.best = float('inf') if self.operator == np.less else -float('inf')
Pick the monitored value. def get_monitor_value(self): "Pick the monitored value." if self.monitor=='trn_loss' and len(self.learn.recorder.losses) == 0: return None elif len(self.learn.recorder.val_losses) == 0: return None values = {'train_loss':self.learn.recorder.losses[-1].cpu().numpy(), 'valid_loss':self.learn.recorder.val_losses[-1]} if values['valid_loss'] is None: return if self.learn.recorder.metrics: for m, n in zip(self.learn.recorder.metrics[-1],self.learn.recorder.names[3:-1]): values[n] = m if values.get(self.monitor) is None: warn(f'{self.__class__} conditioned on metric `{self.monitor}` which is not available. Available metrics are: {", ".join(map(str, self.learn.recorder.names[1:-1]))}') return values.get(self.monitor)
Compare the value monitored to its best score and maybe save the model. def on_epoch_end(self, epoch:int, **kwargs:Any)->None: "Compare the value monitored to its best score and maybe save the model." if self.every=="epoch": self.learn.save(f'{self.name}_{epoch}') else: #every="improvement" current = self.get_monitor_value() if current is not None and self.operator(current, self.best): print(f'Better model found at epoch {epoch} with {self.monitor} value: {current}.') self.best = current self.learn.save(f'{self.name}')
Load the best model. def on_train_end(self, **kwargs): "Load the best model." if self.every=="improvement" and (self.learn.path/f'{self.learn.model_dir}/{self.name}.pth').is_file(): self.learn.load(f'{self.name}', purge=False)
Initialize inner arguments. def on_train_begin(self, **kwargs:Any)->None: "Initialize inner arguments." self.wait, self.opt = 0, self.learn.opt super().on_train_begin(**kwargs)
Compare the value monitored to its best and maybe reduce lr. def on_epoch_end(self, epoch, **kwargs:Any)->None: "Compare the value monitored to its best and maybe reduce lr." current = self.get_monitor_value() if current is None: return if self.operator(current - self.min_delta, self.best): self.best,self.wait = current,0 else: self.wait += 1 if self.wait > self.patience: self.opt.lr *= self.factor self.wait = 0 print(f'Epoch {epoch}: reducing lr to {self.opt.lr}')
Convert a notebook `fname` to html file in `dest_path`. def convert_nb(fname, dest_path='.'): "Convert a notebook `fname` to html file in `dest_path`." from .gen_notebooks import remove_undoc_cells, remove_code_cell_jupyter_widget_state_elem nb = read_nb(fname) nb['cells'] = remove_undoc_cells(nb['cells']) nb['cells'] = remove_code_cell_jupyter_widget_state_elem(nb['cells']) fname = Path(fname).absolute() dest_name = fname.with_suffix('.html').name meta = nb['metadata'] meta_jekyll = meta['jekyll'] if 'jekyll' in meta else {'title': fname.with_suffix('').name} meta_jekyll['nb_path'] = f'{fname.parent.name}/{fname.name}' with open(f'{dest_path}/{dest_name}','w') as f: f.write(exporter.from_notebook_node(nb, resources=meta_jekyll)[0])
Convert modified notebooks in `folder` to html pages in `dest_path`. def convert_all(folder, dest_path='.', force_all=False): "Convert modified notebooks in `folder` to html pages in `dest_path`." path = Path(folder) changed_cnt = 0 for fname in path.glob("*.ipynb"): # only rebuild modified files fname_out = Path(dest_path)/fname.with_suffix('.html').name if not force_all and fname_out.exists(): in_mod = os.path.getmtime(fname) out_mod = os.path.getmtime(fname_out) if in_mod < out_mod: continue print(f"converting: {fname} => {fname_out}") changed_cnt += 1 convert_nb(fname, dest_path=dest_path) if not changed_cnt: print("No notebooks were modified")
Function that collect samples and adds padding. Flips token order if needed def pad_collate(samples:BatchSamples, pad_idx:int=1, pad_first:bool=True, backwards:bool=False) -> Tuple[LongTensor, LongTensor]: "Function that collect samples and adds padding. Flips token order if needed" samples = to_data(samples) max_len = max([len(s[0]) for s in samples]) res = torch.zeros(len(samples), max_len).long() + pad_idx if backwards: pad_first = not pad_first for i,s in enumerate(samples): if pad_first: res[i,-len(s[0]):] = LongTensor(s[0]) else: res[i,:len(s[0]):] = LongTensor(s[0]) if backwards: res = res.flip(1) return res, tensor(np.array([s[1] for s in samples]))
Read the text in `fn`. def open_text(fn:PathOrStr, enc='utf-8'): "Read the text in `fn`." with open(fn,'r', encoding = enc) as f: return ''.join(f.readlines())
Create the ragged array that will be filled when we ask for items. def allocate_buffers(self): "Create the ragged array that will be filled when we ask for items." if self.ite_len is None: len(self) self.idx = LanguageModelPreLoader.CircularIndex(len(self.dataset.x.items), not self.backwards) self.batch = np.zeros((self.bs, self.bptt+1), dtype=np.int64) self.batch_x, self.batch_y = self.batch[:,0:self.bptt], self.batch[:,1:self.bptt+1] #ro: index of the text we're at inside our datasets for the various batches self.ro = np.zeros(self.bs, dtype=np.int64) #ri: index of the token we're at inside our current text for the various batches self.ri = np.zeros(self.bs, dtype=np.int)
Fill the row with tokens from the ragged array. --OBS-- overlap != 1 has not been implemented def fill_row(self, forward, items, idx, row, ro, ri, overlap,lengths): "Fill the row with tokens from the ragged array. --OBS-- overlap != 1 has not been implemented" ibuf = n = 0 ro -= 1 while ibuf < row.size: ro += 1 ix = idx[ro] rag = items[ix] if forward: ri = 0 if ibuf else ri n = min(lengths[ix] - ri, row.size - ibuf) row[ibuf:ibuf+n] = rag[ri:ri+n] else: ri = lengths[ix] if ibuf else ri n = min(ri, row.size - ibuf) row[ibuf:ibuf+n] = rag[ri-n:ri][::-1] ibuf += n return ro, ri + ((n-overlap) if forward else -(n-overlap))
Create a `TextDataBunch` from ids, labels and a `vocab`. `kwargs` are passed to the dataloader creation. def from_ids(cls, path:PathOrStr, vocab:Vocab, train_ids:Collection[Collection[int]], valid_ids:Collection[Collection[int]], test_ids:Collection[Collection[int]]=None, train_lbls:Collection[Union[int,float]]=None, valid_lbls:Collection[Union[int,float]]=None, classes:Collection[Any]=None, processor:PreProcessor=None, **kwargs) -> DataBunch: "Create a `TextDataBunch` from ids, labels and a `vocab`. `kwargs` are passed to the dataloader creation." src = ItemLists(path, TextList(train_ids, vocab, path=path, processor=[]), TextList(valid_ids, vocab, path=path, processor=[])) src = src.label_for_lm() if cls==TextLMDataBunch else src.label_from_lists(train_lbls, valid_lbls, classes=classes, processor=[]) if not is1d(train_lbls): src.train.y.one_hot,src.valid.y.one_hot = True,True if test_ids is not None: src.add_test(TextList(test_ids, vocab, path=path), label=train_lbls[0]) src.valid.x.processor = ifnone(processor, [TokenizeProcessor(), NumericalizeProcessor(vocab=vocab)]) return src.databunch(**kwargs)
Load a `TextDataBunch` from `path/cache_name`. `kwargs` are passed to the dataloader creation. def load(cls, path:PathOrStr, cache_name:PathOrStr='tmp', processor:PreProcessor=None, **kwargs): "Load a `TextDataBunch` from `path/cache_name`. `kwargs` are passed to the dataloader creation." warn("""This method is deprecated and only kept to load data serialized in v1.0.43 or earlier. Use `load_data` for data saved with v1.0.44 or later.""", DeprecationWarning) cache_path = Path(path)/cache_name vocab = Vocab(pickle.load(open(cache_path/'itos.pkl','rb'))) train_ids,train_lbls = np.load(cache_path/f'train_ids.npy'), np.load(cache_path/f'train_lbl.npy') valid_ids,valid_lbls = np.load(cache_path/f'valid_ids.npy'), np.load(cache_path/f'valid_lbl.npy') test_ids = np.load(cache_path/f'test_ids.npy') if os.path.isfile(cache_path/f'test_ids.npy') else None classes = loadtxt_str(cache_path/'classes.txt') if os.path.isfile(cache_path/'classes.txt') else None return cls.from_ids(path, vocab, train_ids, valid_ids, test_ids, train_lbls, valid_lbls, classes, processor, **kwargs)
Create a `TextDataBunch` from tokens and labels. `kwargs` are passed to the dataloader creation. def from_tokens(cls, path:PathOrStr, trn_tok:Collection[Collection[str]], trn_lbls:Collection[Union[int,float]], val_tok:Collection[Collection[str]], val_lbls:Collection[Union[int,float]], vocab:Vocab=None, tst_tok:Collection[Collection[str]]=None, classes:Collection[Any]=None, max_vocab:int=60000, min_freq:int=3, **kwargs) -> DataBunch: "Create a `TextDataBunch` from tokens and labels. `kwargs` are passed to the dataloader creation." processor = NumericalizeProcessor(vocab=vocab, max_vocab=max_vocab, min_freq=min_freq) src = ItemLists(path, TextList(trn_tok, path=path, processor=processor), TextList(val_tok, path=path, processor=processor)) src = src.label_for_lm() if cls==TextLMDataBunch else src.label_from_lists(trn_lbls, val_lbls, classes=classes) if tst_tok is not None: src.add_test(TextList(tst_tok, path=path)) return src.databunch(**kwargs)
Create a `TextDataBunch` from DataFrames. `kwargs` are passed to the dataloader creation. def from_df(cls, path:PathOrStr, train_df:DataFrame, valid_df:DataFrame, test_df:Optional[DataFrame]=None, tokenizer:Tokenizer=None, vocab:Vocab=None, classes:Collection[str]=None, text_cols:IntsOrStrs=1, label_cols:IntsOrStrs=0, label_delim:str=None, chunksize:int=10000, max_vocab:int=60000, min_freq:int=2, mark_fields:bool=False, include_bos:bool=True, include_eos:bool=False, **kwargs) -> DataBunch: "Create a `TextDataBunch` from DataFrames. `kwargs` are passed to the dataloader creation." processor = _get_processor(tokenizer=tokenizer, vocab=vocab, chunksize=chunksize, max_vocab=max_vocab, min_freq=min_freq, mark_fields=mark_fields, include_bos=include_bos, include_eos=include_eos) if classes is None and is_listy(label_cols) and len(label_cols) > 1: classes = label_cols src = ItemLists(path, TextList.from_df(train_df, path, cols=text_cols, processor=processor), TextList.from_df(valid_df, path, cols=text_cols, processor=processor)) if cls==TextLMDataBunch: src = src.label_for_lm() else: if label_delim is not None: src = src.label_from_df(cols=label_cols, classes=classes, label_delim=label_delim) else: src = src.label_from_df(cols=label_cols, classes=classes) if test_df is not None: src.add_test(TextList.from_df(test_df, path, cols=text_cols)) return src.databunch(**kwargs)
Create a `TextDataBunch` from texts in csv files. `kwargs` are passed to the dataloader creation. def from_csv(cls, path:PathOrStr, csv_name, valid_pct:float=0.2, test:Optional[str]=None, tokenizer:Tokenizer=None, vocab:Vocab=None, classes:Collection[str]=None, delimiter:str=None, header='infer', text_cols:IntsOrStrs=1, label_cols:IntsOrStrs=0, label_delim:str=None, chunksize:int=10000, max_vocab:int=60000, min_freq:int=2, mark_fields:bool=False, include_bos:bool=True, include_eos:bool=False, **kwargs) -> DataBunch: "Create a `TextDataBunch` from texts in csv files. `kwargs` are passed to the dataloader creation." df = pd.read_csv(Path(path)/csv_name, header=header, delimiter=delimiter) df = df.iloc[np.random.permutation(len(df))] cut = int(valid_pct * len(df)) + 1 train_df, valid_df = df[cut:], df[:cut] test_df = None if test is None else pd.read_csv(Path(path)/test, header=header, delimiter=delimiter) return cls.from_df(path, train_df, valid_df, test_df, tokenizer=tokenizer, vocab=vocab, classes=classes, text_cols=text_cols, label_cols=label_cols, label_delim=label_delim, chunksize=chunksize, max_vocab=max_vocab, min_freq=min_freq, mark_fields=mark_fields, include_bos=include_bos, include_eos=include_eos, **kwargs)
Create a `TextDataBunch` from text files in folders. def from_folder(cls, path:PathOrStr, train:str='train', valid:str='valid', test:Optional[str]=None, classes:Collection[Any]=None, tokenizer:Tokenizer=None, vocab:Vocab=None, chunksize:int=10000, max_vocab:int=60000, min_freq:int=2, mark_fields:bool=False, include_bos:bool=True, include_eos:bool=False, **kwargs): "Create a `TextDataBunch` from text files in folders." path = Path(path).absolute() processor = [OpenFileProcessor()] + _get_processor(tokenizer=tokenizer, vocab=vocab, chunksize=chunksize, max_vocab=max_vocab, min_freq=min_freq, mark_fields=mark_fields, include_bos=include_bos, include_eos=include_eos) src = (TextList.from_folder(path, processor=processor) .split_by_folder(train=train, valid=valid)) src = src.label_for_lm() if cls==TextLMDataBunch else src.label_from_folder(classes=classes) if test is not None: src.add_test_folder(path/test) return src.databunch(**kwargs)
Create a `TextDataBunch` in `path` from the `datasets` for language modelling. Passes `**dl_kwargs` on to `DataLoader()` def create(cls, train_ds, valid_ds, test_ds=None, path:PathOrStr='.', no_check:bool=False, bs=64, val_bs:int=None, num_workers:int=0, device:torch.device=None, collate_fn:Callable=data_collate, dl_tfms:Optional[Collection[Callable]]=None, bptt:int=70, backwards:bool=False, **dl_kwargs) -> DataBunch: "Create a `TextDataBunch` in `path` from the `datasets` for language modelling. Passes `**dl_kwargs` on to `DataLoader()`" datasets = cls._init_ds(train_ds, valid_ds, test_ds) val_bs = ifnone(val_bs, bs) datasets = [LanguageModelPreLoader(ds, shuffle=(i==0), bs=(bs if i==0 else val_bs), bptt=bptt, backwards=backwards) for i,ds in enumerate(datasets)] val_bs = bs dls = [DataLoader(d, b, shuffle=False, **dl_kwargs) for d,b in zip(datasets, (bs,val_bs,val_bs,val_bs)) if d is not None] return cls(*dls, path=path, device=device, dl_tfms=dl_tfms, collate_fn=collate_fn, no_check=no_check)
Function that transform the `datasets` in a `DataBunch` for classification. Passes `**dl_kwargs` on to `DataLoader()` def create(cls, train_ds, valid_ds, test_ds=None, path:PathOrStr='.', bs:int=32, val_bs:int=None, pad_idx=1, pad_first=True, device:torch.device=None, no_check:bool=False, backwards:bool=False, **dl_kwargs) -> DataBunch: "Function that transform the `datasets` in a `DataBunch` for classification. Passes `**dl_kwargs` on to `DataLoader()`" datasets = cls._init_ds(train_ds, valid_ds, test_ds) val_bs = ifnone(val_bs, bs) collate_fn = partial(pad_collate, pad_idx=pad_idx, pad_first=pad_first, backwards=backwards) train_sampler = SortishSampler(datasets[0].x, key=lambda t: len(datasets[0][t][0].data), bs=bs) train_dl = DataLoader(datasets[0], batch_size=bs, sampler=train_sampler, drop_last=True, **dl_kwargs) dataloaders = [train_dl] for ds in datasets[1:]: lengths = [len(t) for t in ds.x.items] sampler = SortSampler(ds.x, key=lengths.__getitem__) dataloaders.append(DataLoader(ds, batch_size=val_bs, sampler=sampler, **dl_kwargs)) return cls(*dataloaders, path=path, device=device, collate_fn=collate_fn, no_check=no_check)
A special labelling method for language models. def label_for_lm(self, **kwargs): "A special labelling method for language models." self.__class__ = LMTextList kwargs['label_cls'] = LMLabelList return self.label_const(0, **kwargs)
Get the list of files in `path` that have a text suffix. `recurse` determines if we search subfolders. def from_folder(cls, path:PathOrStr='.', extensions:Collection[str]=text_extensions, vocab:Vocab=None, processor:PreProcessor=None, **kwargs)->'TextList': "Get the list of files in `path` that have a text suffix. `recurse` determines if we search subfolders." processor = ifnone(processor, [OpenFileProcessor(), TokenizeProcessor(), NumericalizeProcessor(vocab=vocab)]) return super().from_folder(path=path, extensions=extensions, processor=processor, **kwargs)
Show the `xs` (inputs) and `ys` (targets). `max_len` is the maximum number of tokens displayed. def show_xys(self, xs, ys, max_len:int=70)->None: "Show the `xs` (inputs) and `ys` (targets). `max_len` is the maximum number of tokens displayed." from IPython.display import display, HTML names = ['idx','text'] if self._is_lm else ['text','target'] items = [] for i, (x,y) in enumerate(zip(xs,ys)): txt_x = ' '.join(x.text.split(' ')[:max_len]) if max_len is not None else x.text items.append([i, txt_x] if self._is_lm else [txt_x, y]) items = np.array(items) df = pd.DataFrame({n:items[:,i] for i,n in enumerate(names)}, columns=names) with pd.option_context('display.max_colwidth', -1): display(HTML(df.to_html(index=False)))
r"""InceptionV4 model architecture from the `"Inception-v4, Inception-ResNet..." <https://arxiv.org/abs/1602.07261>`_ paper. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet def inceptionv4(pretrained=True): r"""InceptionV4 model architecture from the `"Inception-v4, Inception-ResNet..." <https://arxiv.org/abs/1602.07261>`_ paper. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = InceptionV4() if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['imagenet'])) return model
This over-ride is necessary because otherwise the learner method accesses the wrong model when it is called with precompute set to true Args: arr: a numpy array to be used as input to the model for prediction purposes Returns: a numpy array containing the predictions from the model def predict_array(self, arr): """ This over-ride is necessary because otherwise the learner method accesses the wrong model when it is called with precompute set to true Args: arr: a numpy array to be used as input to the model for prediction purposes Returns: a numpy array containing the predictions from the model """ precompute = self.precompute self.precompute = False pred = super().predict_array(arr) self.precompute = precompute return pred
Distrubuted training of CIFAR-10. Fastest speed is if you run as follows: python -m fastai.launch train_cifar.py def main( gpu:Param("GPU to run on", str)=None ): """Distrubuted training of CIFAR-10. Fastest speed is if you run as follows: python -m fastai.launch train_cifar.py""" gpu = setup_distrib(gpu) n_gpus = num_distrib() path = url2path(URLs.CIFAR) ds_tfms = ([*rand_pad(4, 32), flip_lr(p=0.5)], []) workers = min(16, num_cpus()//n_gpus) data = ImageDataBunch.from_folder(path, valid='test', ds_tfms=ds_tfms, bs=512//n_gpus, num_workers=workers).normalize(cifar_stats) learn = Learner(data, wrn_22(), metrics=accuracy) if gpu is None: learn.model = nn.DataParallel(learn.model) else: learn.to_distributed(gpu) learn.to_fp16() learn.fit_one_cycle(35, 3e-3, wd=0.4)
Set default values for options. def initialize_options(self): """Set default values for options.""" self.dep_groups = '' self.dep_quote = False self.dep_conda = False
Run command. def run(self): """Run command.""" wanted_groups = self.parse() deps = [] invalid_groups = [] for grp in wanted_groups: if grp in dep_groups: deps.extend(dep_groups[grp]) else: invalid_groups.append(grp) if invalid_groups or not wanted_groups: print("Available dependency groups:", ", ".join(sorted(dep_groups.keys()))) if invalid_groups: print(f"Error: Invalid group name(s): {', '.join(invalid_groups)}") exit(1) else: # prepare for shell word splitting (no whitespace in items) deps = [re.sub(" ", "", x, 0) for x in sorted(set(deps))] if self.dep_conda: for i in range(len(deps)): # strip pip-specific syntax deps[i] = re.sub(r';.*', '', deps[i]) # rename mismatching package names deps[i] = re.sub(r'^torch>', 'pytorch>', deps[i]) if self.dep_quote: # for manual copy-n-paste (assuming no " in vars) print(" ".join(map(lambda x: f'"{x}"', deps))) else: # if fed directly to `pip install` via backticks/$() don't quote print(" ".join(deps))
Return the saved feature indexes that will be concatenated Inputs: sfs (list): saved features by hook function, in other words intermediate activations last (bool): whether to concatenate only last different activation, or all from the encoder model def get_sfs_idxs(sfs, last=True): """ Return the saved feature indexes that will be concatenated Inputs: sfs (list): saved features by hook function, in other words intermediate activations last (bool): whether to concatenate only last different activation, or all from the encoder model """ if last: feature_szs = [sfs_feats.features.size()[-1] for sfs_feats in sfs] sfs_idxs = list(np.where(np.array(feature_szs[:-1]) != np.array(feature_szs[1:]))[0]) if feature_szs[0] != feature_szs[1]: sfs_idxs = [0] + sfs_idxs else: sfs_idxs = list(range(len(sfs))) return sfs_idxs
Return a `Hook` that stores activations of `module` in `self.stored` def hook_output (module:nn.Module, detach:bool=True, grad:bool=False)->Hook: "Return a `Hook` that stores activations of `module` in `self.stored`" return Hook(module, _hook_inner, detach=detach, is_forward=not grad)
Return `Hooks` that store activations of all `modules` in `self.stored` def hook_outputs(modules:Collection[nn.Module], detach:bool=True, grad:bool=False)->Hooks: "Return `Hooks` that store activations of all `modules` in `self.stored`" return Hooks(modules, _hook_inner, detach=detach, is_forward=not grad)
Create a dummy batch to go through `m` with `size`. def dummy_batch(m: nn.Module, size:tuple=(64,64))->Tensor: "Create a dummy batch to go through `m` with `size`." ch_in = in_channels(m) return one_param(m).new(1, ch_in, *size).requires_grad_(False).uniform_(-1.,1.)