text
stringlengths 81
112k
|
|---|
Generator that yields series of consecutive audio frames comprising each utterence, separated by yielding a single None.
Determines voice activity by ratio of frames in padding_ms. Uses a buffer to include padding_ms prior to being triggered.
Example: (frame, ..., frame, None, frame, ..., frame, None, ...)
|---utterence---| |---utterence---|
def vad_collector(self, padding_ms=300, ratio=0.75, frames=None):
"""Generator that yields series of consecutive audio frames comprising each utterence, separated by yielding a single None.
Determines voice activity by ratio of frames in padding_ms. Uses a buffer to include padding_ms prior to being triggered.
Example: (frame, ..., frame, None, frame, ..., frame, None, ...)
|---utterence---| |---utterence---|
"""
if frames is None: frames = self.frame_generator()
num_padding_frames = padding_ms // self.frame_duration_ms
ring_buffer = collections.deque(maxlen=num_padding_frames)
triggered = False
for frame in frames:
is_speech = self.vad.is_speech(frame, self.sample_rate)
if not triggered:
ring_buffer.append((frame, is_speech))
num_voiced = len([f for f, speech in ring_buffer if speech])
if num_voiced > ratio * ring_buffer.maxlen:
triggered = True
for f, s in ring_buffer:
yield f
ring_buffer.clear()
else:
yield frame
ring_buffer.append((frame, is_speech))
num_unvoiced = len([f for f, speech in ring_buffer if not speech])
if num_unvoiced > ratio * ring_buffer.maxlen:
triggered = False
yield None
ring_buffer.clear()
|
Global `cut` function that supports parallel processing.
Note that this only works using dt, custom POSTokenizer
instances are not supported.
def cut(sentence, HMM=True):
"""
Global `cut` function that supports parallel processing.
Note that this only works using dt, custom POSTokenizer
instances are not supported.
"""
global dt
if jieba.pool is None:
for w in dt.cut(sentence, HMM=HMM):
yield w
else:
parts = strdecode(sentence).splitlines(True)
if HMM:
result = jieba.pool.map(_lcut_internal, parts)
else:
result = jieba.pool.map(_lcut_internal_no_hmm, parts)
for r in result:
for w in r:
yield w
|
Change the module's `cut` and `cut_for_search` functions to the
parallel version.
Note that this only works using dt, custom Tokenizer
instances are not supported.
def enable_parallel(processnum=None):
"""
Change the module's `cut` and `cut_for_search` functions to the
parallel version.
Note that this only works using dt, custom Tokenizer
instances are not supported.
"""
global pool, dt, cut, cut_for_search
from multiprocessing import cpu_count
if os.name == 'nt':
raise NotImplementedError(
"jieba: parallel mode only supports posix system")
else:
from multiprocessing import Pool
dt.check_initialized()
if processnum is None:
processnum = cpu_count()
pool = Pool(processnum)
cut = _pcut
cut_for_search = _pcut_for_search
|
The main function that segments an entire sentence that contains
Chinese characters into separated words.
Parameter:
- sentence: The str(unicode) to be segmented.
- cut_all: Model type. True for full pattern, False for accurate pattern.
- HMM: Whether to use the Hidden Markov Model.
def cut(self, sentence, cut_all=False, HMM=True):
'''
The main function that segments an entire sentence that contains
Chinese characters into separated words.
Parameter:
- sentence: The str(unicode) to be segmented.
- cut_all: Model type. True for full pattern, False for accurate pattern.
- HMM: Whether to use the Hidden Markov Model.
'''
sentence = strdecode(sentence)
if cut_all:
re_han = re_han_cut_all
re_skip = re_skip_cut_all
else:
re_han = re_han_default
re_skip = re_skip_default
if cut_all:
cut_block = self.__cut_all
elif HMM:
cut_block = self.__cut_DAG
else:
cut_block = self.__cut_DAG_NO_HMM
blocks = re_han.split(sentence)
for blk in blocks:
if not blk:
continue
if re_han.match(blk):
for word in cut_block(blk):
yield word
else:
tmp = re_skip.split(blk)
for x in tmp:
if re_skip.match(x):
yield x
elif not cut_all:
for xx in x:
yield xx
else:
yield x
|
Finer segmentation for search engines.
def cut_for_search(self, sentence, HMM=True):
"""
Finer segmentation for search engines.
"""
words = self.cut(sentence, HMM=HMM)
for w in words:
if len(w) > 2:
for i in xrange(len(w) - 1):
gram2 = w[i:i + 2]
if self.FREQ.get(gram2):
yield gram2
if len(w) > 3:
for i in xrange(len(w) - 2):
gram3 = w[i:i + 3]
if self.FREQ.get(gram3):
yield gram3
yield w
|
Load personalized dict to improve detect rate.
Parameter:
- f : A plain text file contains words and their ocurrences.
Can be a file-like object, or the path of the dictionary file,
whose encoding must be utf-8.
Structure of dict file:
word1 freq1 word_type1
word2 freq2 word_type2
...
Word type may be ignored
def load_userdict(self, f):
'''
Load personalized dict to improve detect rate.
Parameter:
- f : A plain text file contains words and their ocurrences.
Can be a file-like object, or the path of the dictionary file,
whose encoding must be utf-8.
Structure of dict file:
word1 freq1 word_type1
word2 freq2 word_type2
...
Word type may be ignored
'''
self.check_initialized()
if isinstance(f, string_types):
f_name = f
f = open(f, 'rb')
else:
f_name = resolve_filename(f)
for lineno, ln in enumerate(f, 1):
line = ln.strip()
if not isinstance(line, text_type):
try:
line = line.decode('utf-8').lstrip('\ufeff')
except UnicodeDecodeError:
raise ValueError('dictionary file %s must be utf-8' % f_name)
if not line:
continue
# match won't be None because there's at least one character
word, freq, tag = re_userdict.match(line).groups()
if freq is not None:
freq = freq.strip()
if tag is not None:
tag = tag.strip()
self.add_word(word, freq, tag)
|
Add a word to dictionary.
freq and tag can be omitted, freq defaults to be a calculated value
that ensures the word can be cut out.
def add_word(self, word, freq=None, tag=None):
"""
Add a word to dictionary.
freq and tag can be omitted, freq defaults to be a calculated value
that ensures the word can be cut out.
"""
self.check_initialized()
word = strdecode(word)
freq = int(freq) if freq is not None else self.suggest_freq(word, False)
self.FREQ[word] = freq
self.total += freq
if tag:
self.user_word_tag_tab[word] = tag
for ch in xrange(len(word)):
wfrag = word[:ch + 1]
if wfrag not in self.FREQ:
self.FREQ[wfrag] = 0
if freq == 0:
finalseg.add_force_split(word)
|
Suggest word frequency to force the characters in a word to be
joined or splitted.
Parameter:
- segment : The segments that the word is expected to be cut into,
If the word should be treated as a whole, use a str.
- tune : If True, tune the word frequency.
Note that HMM may affect the final result. If the result doesn't change,
set HMM=False.
def suggest_freq(self, segment, tune=False):
"""
Suggest word frequency to force the characters in a word to be
joined or splitted.
Parameter:
- segment : The segments that the word is expected to be cut into,
If the word should be treated as a whole, use a str.
- tune : If True, tune the word frequency.
Note that HMM may affect the final result. If the result doesn't change,
set HMM=False.
"""
self.check_initialized()
ftotal = float(self.total)
freq = 1
if isinstance(segment, string_types):
word = segment
for seg in self.cut(word, HMM=False):
freq *= self.FREQ.get(seg, 1) / ftotal
freq = max(int(freq * self.total) + 1, self.FREQ.get(word, 1))
else:
segment = tuple(map(strdecode, segment))
word = ''.join(segment)
for seg in segment:
freq *= self.FREQ.get(seg, 1) / ftotal
freq = min(int(freq * self.total), self.FREQ.get(word, 0))
if tune:
add_word(word, freq)
return freq
|
Tokenize a sentence and yields tuples of (word, start, end)
Parameter:
- sentence: the str(unicode) to be segmented.
- mode: "default" or "search", "search" is for finer segmentation.
- HMM: whether to use the Hidden Markov Model.
def tokenize(self, unicode_sentence, mode="default", HMM=True):
"""
Tokenize a sentence and yields tuples of (word, start, end)
Parameter:
- sentence: the str(unicode) to be segmented.
- mode: "default" or "search", "search" is for finer segmentation.
- HMM: whether to use the Hidden Markov Model.
"""
if not isinstance(unicode_sentence, text_type):
raise ValueError("jieba: the input parameter should be unicode.")
start = 0
if mode == 'default':
for w in self.cut(unicode_sentence, HMM=HMM):
width = len(w)
yield (w, start, start + width)
start += width
else:
for w in self.cut(unicode_sentence, HMM=HMM):
width = len(w)
if len(w) > 2:
for i in xrange(len(w) - 1):
gram2 = w[i:i + 2]
if self.FREQ.get(gram2):
yield (gram2, start + i, start + i + 2)
if len(w) > 3:
for i in xrange(len(w) - 2):
gram3 = w[i:i + 3]
if self.FREQ.get(gram3):
yield (gram3, start + i, start + i + 3)
yield (w, start, start + width)
start += width
|
Extract keywords from sentence using TextRank algorithm.
Parameter:
- topK: return how many top keywords. `None` for all possible words.
- withWeight: if True, return a list of (word, weight);
if False, return a list of words.
- allowPOS: the allowed POS list eg. ['ns', 'n', 'vn', 'v'].
if the POS of w is not in this list, it will be filtered.
- withFlag: if True, return a list of pair(word, weight) like posseg.cut
if False, return a list of words
def textrank(self, sentence, topK=20, withWeight=False, allowPOS=('ns', 'n', 'vn', 'v'), withFlag=False):
"""
Extract keywords from sentence using TextRank algorithm.
Parameter:
- topK: return how many top keywords. `None` for all possible words.
- withWeight: if True, return a list of (word, weight);
if False, return a list of words.
- allowPOS: the allowed POS list eg. ['ns', 'n', 'vn', 'v'].
if the POS of w is not in this list, it will be filtered.
- withFlag: if True, return a list of pair(word, weight) like posseg.cut
if False, return a list of words
"""
self.pos_filt = frozenset(allowPOS)
g = UndirectWeightedGraph()
cm = defaultdict(int)
words = tuple(self.tokenizer.cut(sentence))
for i, wp in enumerate(words):
if self.pairfilter(wp):
for j in xrange(i + 1, i + self.span):
if j >= len(words):
break
if not self.pairfilter(words[j]):
continue
if allowPOS and withFlag:
cm[(wp, words[j])] += 1
else:
cm[(wp.word, words[j].word)] += 1
for terms, w in cm.items():
g.addEdge(terms[0], terms[1], w)
nodes_rank = g.rank()
if withWeight:
tags = sorted(nodes_rank.items(), key=itemgetter(1), reverse=True)
else:
tags = sorted(nodes_rank, key=nodes_rank.__getitem__, reverse=True)
if topK:
return tags[:topK]
else:
return tags
|
Extract keywords from sentence using TF-IDF algorithm.
Parameter:
- topK: return how many top keywords. `None` for all possible words.
- withWeight: if True, return a list of (word, weight);
if False, return a list of words.
- allowPOS: the allowed POS list eg. ['ns', 'n', 'vn', 'v','nr'].
if the POS of w is not in this list,it will be filtered.
- withFlag: only work with allowPOS is not empty.
if True, return a list of pair(word, weight) like posseg.cut
if False, return a list of words
def extract_tags(self, sentence, topK=20, withWeight=False, allowPOS=(), withFlag=False):
"""
Extract keywords from sentence using TF-IDF algorithm.
Parameter:
- topK: return how many top keywords. `None` for all possible words.
- withWeight: if True, return a list of (word, weight);
if False, return a list of words.
- allowPOS: the allowed POS list eg. ['ns', 'n', 'vn', 'v','nr'].
if the POS of w is not in this list,it will be filtered.
- withFlag: only work with allowPOS is not empty.
if True, return a list of pair(word, weight) like posseg.cut
if False, return a list of words
"""
if allowPOS:
allowPOS = frozenset(allowPOS)
words = self.postokenizer.cut(sentence)
else:
words = self.tokenizer.cut(sentence)
freq = {}
for w in words:
if allowPOS:
if w.flag not in allowPOS:
continue
elif not withFlag:
w = w.word
wc = w.word if allowPOS and withFlag else w
if len(wc.strip()) < 2 or wc.lower() in self.stop_words:
continue
freq[w] = freq.get(w, 0.0) + 1.0
total = sum(freq.values())
for k in freq:
kw = k.word if allowPOS and withFlag else k
freq[k] *= self.idf_freq.get(kw, self.median_idf) / total
if withWeight:
tags = sorted(freq.items(), key=itemgetter(1), reverse=True)
else:
tags = sorted(freq, key=freq.__getitem__, reverse=True)
if topK:
return tags[:topK]
else:
return tags
|
Generates raw (English, other) pairs from a ParaCrawl V3.0 data file.
Args:
paracrawl_file: A ParaCrawl V3.0 en-.. data file.
Yields:
Pairs of (sentence_en, sentence_xx), as Unicode strings.
Raises:
StopIteration: If the file ends while this method is in the middle of
creating a translation pair.
def paracrawl_v3_pairs(paracrawl_file):
"""Generates raw (English, other) pairs from a ParaCrawl V3.0 data file.
Args:
paracrawl_file: A ParaCrawl V3.0 en-.. data file.
Yields:
Pairs of (sentence_en, sentence_xx), as Unicode strings.
Raises:
StopIteration: If the file ends while this method is in the middle of
creating a translation pair.
"""
raw_sentences = _raw_sentences(paracrawl_file)
for s_en in raw_sentences:
try:
s_xx = next(raw_sentences)
if s_en and s_xx: # Prevent empty string examples.
yield s_en, s_xx
except StopIteration:
tf.logging.error(
'Unmatched final sentence while reading in sentence pairs: [%s]',
s_en)
|
Generates Unicode strings, one for each <seg> in a ParaCrawl data file.
Also decodes some of the most common HTML entities found in ParaCrawl data.
Args:
paracrawl_file: A ParaCrawl V3.0 en-.. data file.
Yields:
One Unicode string for each <seg> element in the ParaCrawl data file.
def _raw_sentences(paracrawl_file):
"""Generates Unicode strings, one for each <seg> in a ParaCrawl data file.
Also decodes some of the most common HTML entities found in ParaCrawl data.
Args:
paracrawl_file: A ParaCrawl V3.0 en-.. data file.
Yields:
One Unicode string for each <seg> element in the ParaCrawl data file.
"""
for line_utf8 in paracrawl_file:
line_uni = line_utf8.decode('UTF-8')
text_match = re.match(r' +<seg>(.*)</seg>$', line_uni)
if text_match:
txt = text_match.group(1)
txt = re.sub(r'&', r'&', txt)
txt = re.sub(r'& ?amp;', r'&', txt)
txt = re.sub(r'& ?apos;', r"'", txt)
txt = re.sub(r'& ?quot;', r'"', txt)
txt = re.sub(r'& ?lt;', r'<', txt)
txt = re.sub(r'& ?gt;', r'>', txt)
yield txt
|
Generates a cleaned-up stream of (English, other) translation pairs.
Cleaning includes both filtering and simplistic sentence splitting, with
minimal assumptions on the non-English pair member: (1) All filtering is
done based on the English member of the pair, and (2) sentence splitting
assumes only that sentences can end with one of '.!?' and begin with an
ASCII uppercase letter. Input pairs that would get split into different
numbers of sentences (e.g., three English sentences vs. two German ones) are
discarded.
Args:
en_xx_pairs: A stream (iterable) of Unicode string pairs. Each item in the
stream should be a (sentence_en, sentence_xx) pair.
Yields:
Cleaned-up (sentence_en, sentence_xx) pairs.
def clean_en_xx_pairs(en_xx_pairs):
"""Generates a cleaned-up stream of (English, other) translation pairs.
Cleaning includes both filtering and simplistic sentence splitting, with
minimal assumptions on the non-English pair member: (1) All filtering is
done based on the English member of the pair, and (2) sentence splitting
assumes only that sentences can end with one of '.!?' and begin with an
ASCII uppercase letter. Input pairs that would get split into different
numbers of sentences (e.g., three English sentences vs. two German ones) are
discarded.
Args:
en_xx_pairs: A stream (iterable) of Unicode string pairs. Each item in the
stream should be a (sentence_en, sentence_xx) pair.
Yields:
Cleaned-up (sentence_en, sentence_xx) pairs.
"""
for s1, s2 in en_xx_pairs:
if _regex_filter(s1):
continue
s1_list, s2_list = _split_sentences(s1, s2)
if len(s1_list) != len(s2_list):
continue # discard this pair
elif len(s1_list) == 1:
yield s1, s2
else:
for s1_subsentence, s2_subsentence in itertools.izip(s1_list, s2_list):
if _regex_filter(s1_subsentence):
continue
yield s1_subsentence, s2_subsentence
|
Obtain a list of image paths corresponding to training or eval case.
Args:
tmp_dir: str, the root path to which raw images were written, at the
top level having meta/ and raw/ subdirs.
case: bool, whether obtaining file paths for training (true) or eval
(false).
training_fraction: float, the fraction of the sub-image path list to
consider as the basis for training examples.
Returns:
list: A list of file paths.
Raises:
ValueError: if images not found in tmp_dir, or if training_fraction would
leave no examples for eval.
def _get_case_file_paths(tmp_dir, case, training_fraction=0.95):
"""Obtain a list of image paths corresponding to training or eval case.
Args:
tmp_dir: str, the root path to which raw images were written, at the
top level having meta/ and raw/ subdirs.
case: bool, whether obtaining file paths for training (true) or eval
(false).
training_fraction: float, the fraction of the sub-image path list to
consider as the basis for training examples.
Returns:
list: A list of file paths.
Raises:
ValueError: if images not found in tmp_dir, or if training_fraction would
leave no examples for eval.
"""
paths = tf.gfile.Glob("%s/*.jpg" % tmp_dir)
if not paths:
raise ValueError("Search of tmp_dir (%s) " % tmp_dir,
"for subimage paths yielded an empty list, ",
"can't proceed with returning training/eval split.")
split_index = int(math.floor(len(paths)*training_fraction))
if split_index >= len(paths):
raise ValueError("For a path list of size %s "
"and a training_fraction of %s "
"the resulting split_index of the paths list, "
"%s, would leave no elements for the eval "
"condition." % (len(paths),
training_fraction,
split_index))
if case:
return paths[:split_index]
else:
return paths[split_index:]
|
Download a set of images from api.brain-map.org to `target_dir`.
Args:
image_ids: list, a list of image ids.
target_dir: str, a directory to which to download the images.
def maybe_download_image_dataset(image_ids, target_dir):
"""Download a set of images from api.brain-map.org to `target_dir`.
Args:
image_ids: list, a list of image ids.
target_dir: str, a directory to which to download the images.
"""
tf.gfile.MakeDirs(target_dir)
num_images = len(image_ids)
for i, image_id in enumerate(image_ids):
destination = os.path.join(target_dir, "%s.jpg" % i)
tmp_destination = "%s.temp" % destination
source_url = ("http://api.brain-map.org/api/v2/"
"section_image_download/%s" % image_id)
if tf.gfile.Exists(destination):
tf.logging.info("Image with ID already present, "
"skipping download (%s of %s)." % (
i+1, num_images
))
continue
tf.logging.info("Downloading image with id %s (%s of %s)" % (
image_id, i+1, num_images
))
response = requests.get(source_url, stream=True)
response.raise_for_status()
with tf.gfile.Open(tmp_destination, "w") as f:
for block in response.iter_content(1024):
f.write(block)
tf.gfile.Rename(tmp_destination, destination)
|
Create a numpy array with specified shape and masked fraction.
Args:
shape: tuple, shape of the mask to create.
fraction: float, fraction of the mask area to populate with `mask_scalar`.
Returns:
numpy.array: A numpy array storing the mask.
def random_square_mask(shape, fraction):
"""Create a numpy array with specified shape and masked fraction.
Args:
shape: tuple, shape of the mask to create.
fraction: float, fraction of the mask area to populate with `mask_scalar`.
Returns:
numpy.array: A numpy array storing the mask.
"""
mask = np.ones(shape)
patch_area = shape[0]*shape[1]*fraction
patch_dim = np.int(math.floor(math.sqrt(patch_area)))
if patch_area == 0 or patch_dim == 0:
return mask
x = np.random.randint(shape[0] - patch_dim)
y = np.random.randint(shape[1] - patch_dim)
mask[x:(x + patch_dim), y:(y + patch_dim), :] = 0
return mask
|
Base problem example generator for Allen Brain Atlas problems.
Args:
tmp_dir: str, a directory where raw example input data has been stored.
training: bool, whether the mode of operation is training (or,
alternatively, evaluation), determining whether examples in tmp_dir
prefixed with train or dev will be used.
size: int, the image size to add to the example annotation.
training_fraction: float, the fraction of the sub-image path list to
consider as the basis for training examples.
Yields:
A dictionary representing the images with the following fields:
* image/encoded: The string encoding the image as JPEG.
* image/format: The string "jpeg" indicating the image format.
* image/height: The integer indicating the image height.
* image/width: The integer indicating the image height.
def _generator(tmp_dir, training, size=_BASE_EXAMPLE_IMAGE_SIZE,
training_fraction=0.95):
"""Base problem example generator for Allen Brain Atlas problems.
Args:
tmp_dir: str, a directory where raw example input data has been stored.
training: bool, whether the mode of operation is training (or,
alternatively, evaluation), determining whether examples in tmp_dir
prefixed with train or dev will be used.
size: int, the image size to add to the example annotation.
training_fraction: float, the fraction of the sub-image path list to
consider as the basis for training examples.
Yields:
A dictionary representing the images with the following fields:
* image/encoded: The string encoding the image as JPEG.
* image/format: The string "jpeg" indicating the image format.
* image/height: The integer indicating the image height.
* image/width: The integer indicating the image height.
"""
maybe_download_image_dataset(_IMAGE_IDS, tmp_dir)
image_files = _get_case_file_paths(tmp_dir=tmp_dir,
case=training,
training_fraction=training_fraction)
image_obj = PIL_Image()
tf.logging.info("Loaded case file paths (n=%s)" % len(image_files))
height = size
width = size
for input_path in image_files:
img = image_obj.open(input_path)
img = np.float32(img)
shape = np.shape(img)
for h_index in range(0, int(math.floor(shape[0]/size))):
h_offset = h_index * size
h_end = h_offset + size - 1
for v_index in range(0, int(math.floor(shape[1]/size))):
v_offset = v_index * size
v_end = v_offset + size - 1
# Extract a sub-image tile.
subimage = np.uint8(img[h_offset:h_end, v_offset:v_end]) # pylint: disable=invalid-sequence-index
# Filter images that are likely background (not tissue).
if np.amax(subimage) < 230:
continue
subimage = image_obj.fromarray(subimage)
buff = BytesIO()
subimage.save(buff, format="JPEG")
subimage_encoded = buff.getvalue()
yield {
"image/encoded": [subimage_encoded],
"image/format": ["jpeg"],
"image/height": [height],
"image/width": [width]
}
|
Set of hyperparameters.
def transformer_moe_base():
"""Set of hyperparameters."""
hparams = common_hparams.basic_params1()
hparams.norm_type = "layer"
hparams.hidden_size = 512
hparams.batch_size = 4096
hparams.max_length = 2001
hparams.max_input_seq_length = 2000
hparams.max_target_seq_length = 2000
hparams.dropout = 0.0
hparams.clip_grad_norm = 0. # i.e. no gradient clipping
hparams.optimizer_adam_epsilon = 1e-9
hparams.learning_rate_decay_scheme = "noam"
hparams.learning_rate = 0.1
hparams.learning_rate_warmup_steps = 2000
hparams.initializer_gain = 1.0
hparams.num_hidden_layers = 5
hparams.initializer = "uniform_unit_scaling"
hparams.weight_decay = 0.0
hparams.optimizer_adam_beta1 = 0.9
hparams.optimizer_adam_beta2 = 0.98
hparams.num_sampled_classes = 0
hparams.label_smoothing = 0.0
hparams.shared_embedding_and_softmax_weights = True
# According to noam, ("n", "da") seems better for harder-to-learn models
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
# Hparams used by transformer_prepare_decoder() function
hparams.add_hparam("pos", "timing") # timing, none
hparams.add_hparam("proximity_bias", False)
hparams.add_hparam("causal_decoder_self_attention", True)
hparams = common_attention.add_standard_attention_hparams(hparams)
# Decoder layers type. If set, num_decoder_layers parameter will be ignored
# and the number of decoder layer will be deduced from the string
# See top file comment for example of usage
hparams.add_hparam("layer_types", "")
# Default attention type (ex: a, loc, red,...) and feed-forward type (ex: fc,
# sep, moe,...)
hparams.add_hparam("default_att", "a")
hparams.add_hparam("default_ff", "fc")
return hparams
|
Hyper parameters specifics for long sequence generation.
def transformer_moe_8k():
"""Hyper parameters specifics for long sequence generation."""
hparams = transformer_moe_base()
hparams.batch_size = 8192
hparams.max_length = 0 # max_length == batch_size
hparams.eval_drop_long_sequences = True
hparams.min_length_bucket = 256 # Avoid cyclic problems for big batches
hparams.default_ff = "sep"
hparams.hidden_size = 1024
return hparams
|
Base transformers model with moe.
Will have the following architecture:
* No encoder.
* Layer 0: a - sep (self-attention - unmasked separable convolutions)
* Layer 1: a - sep
* Layer 2: a - sep
* Layer 3: a - sep
* Layer 4: a - sep
* Decoder architecture:
* Layer 0: a - a - sepm (self-attention - enco/deco-attention - masked sep)
* Layer 1: a - a - sepm
* Layer 2: a - a - moe (mixture of expert layers in the middle)
* Layer 3: a - a - sepm
* Layer 4: a - a - sepm
Returns:
hparams
def transformer_moe_2k():
"""Base transformers model with moe.
Will have the following architecture:
* No encoder.
* Layer 0: a - sep (self-attention - unmasked separable convolutions)
* Layer 1: a - sep
* Layer 2: a - sep
* Layer 3: a - sep
* Layer 4: a - sep
* Decoder architecture:
* Layer 0: a - a - sepm (self-attention - enco/deco-attention - masked sep)
* Layer 1: a - a - sepm
* Layer 2: a - a - moe (mixture of expert layers in the middle)
* Layer 3: a - a - sepm
* Layer 4: a - a - sepm
Returns:
hparams
"""
hparams = transformer_moe_8k()
hparams.batch_size = 2048
hparams.default_ff = "sep"
# hparams.layer_types contains the network architecture:
encoder_archi = "a/a/a/a/a"
decoder_archi = "a-sepm/a-sepm/a-moe/a-sepm/a-sepm"
hparams.layer_types = "{}#{}".format(encoder_archi, decoder_archi)
return hparams
|
Model which formulate a seq2seq problem as language modeling.
def transformer_moe_prepend_8k():
"""Model which formulate a seq2seq problem as language modeling."""
hparams = transformer_moe_8k()
hparams.prepend_mode = "prepend_inputs_masked_attention"
hparams.eval_drop_long_sequences = False
hparams.max_input_seq_length = 7500
hparams.default_ff = "sepm"
hparams.layer_types = "locm/redm/locm-moe/redm/locm"
hparams.moe_num_experts = 256
return hparams
|
Applies residual function for RevNet.
Args:
x: input tensor
depth1: Number of output channels for the first and second conv layers.
depth2: Number of output channels for the third conv layer.
dim: '2d' if 2-dimensional, '3d' if 3-dimensional.
first_batch_norm: Whether to keep the first batch norm layer or not.
Typically used in the first RevNet block.
stride: Stride for the first conv filter. Note that this particular
RevNet architecture only varies the stride for the first conv
filter. The stride for the second conv filter is always set to 1.
training: True for train phase, False for eval phase.
bottleneck: If true, apply bottleneck 1x1 down/up sampling.
padding: Padding for each conv layer.
Returns:
Output tensor after applying residual function for RevNet.
def f(x, depth1, depth2, dim='2d', first_batch_norm=True, stride=1,
training=True, bottleneck=True, padding='SAME'):
"""Applies residual function for RevNet.
Args:
x: input tensor
depth1: Number of output channels for the first and second conv layers.
depth2: Number of output channels for the third conv layer.
dim: '2d' if 2-dimensional, '3d' if 3-dimensional.
first_batch_norm: Whether to keep the first batch norm layer or not.
Typically used in the first RevNet block.
stride: Stride for the first conv filter. Note that this particular
RevNet architecture only varies the stride for the first conv
filter. The stride for the second conv filter is always set to 1.
training: True for train phase, False for eval phase.
bottleneck: If true, apply bottleneck 1x1 down/up sampling.
padding: Padding for each conv layer.
Returns:
Output tensor after applying residual function for RevNet.
"""
conv = CONFIG[dim]['conv']
with tf.variable_scope('f', reuse=tf.AUTO_REUSE):
if first_batch_norm:
net = tf.layers.batch_normalization(x, training=training)
net = tf.nn.relu(net)
else:
net = x
if bottleneck:
net = conv(net, depth1, 1, strides=stride,
padding=padding, activation=None)
net = tf.layers.batch_normalization(net, training=training)
net = tf.nn.relu(net)
net = conv(net, depth1, 3, strides=1,
padding=padding, activation=None)
net = tf.layers.batch_normalization(net, training=training)
net = tf.nn.relu(net)
net = conv(net, depth2, 1, strides=1,
padding=padding, activation=None)
else:
net = conv(net, depth2, 3, strides=stride,
padding=padding, activation=None)
net = tf.layers.batch_normalization(x, training=training)
net = tf.nn.relu(net)
net = conv(net, depth2, 3, strides=stride,
padding=padding, activation=None)
return net
|
Downsamples 'x' by `stride` using a 1x1 convolution filter.
Args:
x: input tensor of size [N, H, W, C]
output_channels: Desired number of output channels.
dim: '2d' if 2-dimensional, '3d' if 3-dimensional.
stride: What stride to use. Usually 1 or 2.
scope: Optional variable scope.
Returns:
A downsampled tensor of size [N, H/2, W/2, output_channels] if stride
is 2, else returns a tensor of size [N, H, W, output_channels] if
stride is 1.
def downsample_bottleneck(x, output_channels, dim='2d', stride=1, scope='h'):
"""Downsamples 'x' by `stride` using a 1x1 convolution filter.
Args:
x: input tensor of size [N, H, W, C]
output_channels: Desired number of output channels.
dim: '2d' if 2-dimensional, '3d' if 3-dimensional.
stride: What stride to use. Usually 1 or 2.
scope: Optional variable scope.
Returns:
A downsampled tensor of size [N, H/2, W/2, output_channels] if stride
is 2, else returns a tensor of size [N, H, W, output_channels] if
stride is 1.
"""
conv = CONFIG[dim]['conv']
with tf.variable_scope(scope):
x = conv(x, output_channels, 1, strides=stride, padding='SAME',
activation=None)
return x
|
Downsamples 'x' by `stride` using average pooling.
Args:
x: input tensor of size [N, H, W, C]
output_channels: Desired number of output channels.
dim: '2d' if 2-dimensional, '3d' if 3-dimensional.
stride: What stride to use. Usually 1 or 2.
scope: Optional variable scope.
Returns:
A downsampled tensor of size [N, H/2, W/2, output_channels] if stride
is 2, else returns a tensor of size [N, H, W, output_channels] if
stride is 1.
def downsample_residual(x, output_channels, dim='2d', stride=1, scope='h'):
"""Downsamples 'x' by `stride` using average pooling.
Args:
x: input tensor of size [N, H, W, C]
output_channels: Desired number of output channels.
dim: '2d' if 2-dimensional, '3d' if 3-dimensional.
stride: What stride to use. Usually 1 or 2.
scope: Optional variable scope.
Returns:
A downsampled tensor of size [N, H/2, W/2, output_channels] if stride
is 2, else returns a tensor of size [N, H, W, output_channels] if
stride is 1.
"""
with tf.variable_scope(scope):
if stride > 1:
avg_pool = CONFIG[dim]['avg_pool']
x = avg_pool(x,
pool_size=(stride, stride),
strides=(stride, stride),
padding='VALID')
input_channels = tf.shape(x)[3]
diff = output_channels - input_channels
x = tf.pad(
x, [[0, 0], [0, 0], [0, 0],
[diff // 2, diff // 2]])
return x
|
Standard ResNet initial block used as first RevNet block.
Args:
images: [N, H, W, 3] tensor of input images to the model.
num_channels: Output depth of convolutional layer in initial block.
dim: '2d' if 2-dimensional, '3d' if 3-dimensional.
stride: stride for the convolution and pool layer.
kernel_size: Size of the initial convolution filter
maxpool: If true, apply a maxpool after the convolution
training: True for train phase, False for eval phase.
scope: Optional scope for the init block.
Returns:
Two [N, H, W, C] output activations from input images.
def init(images, num_channels, dim='2d', stride=2,
kernel_size=7, maxpool=True, training=True, scope='init'):
"""Standard ResNet initial block used as first RevNet block.
Args:
images: [N, H, W, 3] tensor of input images to the model.
num_channels: Output depth of convolutional layer in initial block.
dim: '2d' if 2-dimensional, '3d' if 3-dimensional.
stride: stride for the convolution and pool layer.
kernel_size: Size of the initial convolution filter
maxpool: If true, apply a maxpool after the convolution
training: True for train phase, False for eval phase.
scope: Optional scope for the init block.
Returns:
Two [N, H, W, C] output activations from input images.
"""
conv = CONFIG[dim]['conv']
pool = CONFIG[dim]['max_pool']
with tf.variable_scope(scope):
net = conv(images, num_channels, kernel_size, strides=stride,
padding='SAME', activation=None)
net = tf.layers.batch_normalization(net, training=training)
net = tf.nn.relu(net)
if maxpool:
net = pool(net, pool_size=3, strides=stride)
x1, x2 = tf.split(net, 2, axis=CONFIG[dim]['split_axis'])
return x1, x2
|
Implements bottleneck RevNet unit from authors' RevNet architecture.
Args:
x1: [N, H, W, C] tensor of network activations.
x2: [N, H, W, C] tensor of network activations.
block_num: integer ID of block
depth: First depth in bottleneck residual unit.
num_layers: Number of layers in the RevNet block.
dim: '2d' if 2-dimensional, '3d' if 3-dimensional.
bottleneck: Should a bottleneck layer be used.
first_batch_norm: Whether to keep the first batch norm layer or not.
Typically used in the first RevNet block.
stride: Stride for the residual function.
training: True for train phase, False for eval phase.
Returns:
Two [N, H, W, C] output activation tensors.
def unit(x1, x2, block_num, depth, num_layers, dim='2d',
bottleneck=True, first_batch_norm=True, stride=1, training=True):
"""Implements bottleneck RevNet unit from authors' RevNet architecture.
Args:
x1: [N, H, W, C] tensor of network activations.
x2: [N, H, W, C] tensor of network activations.
block_num: integer ID of block
depth: First depth in bottleneck residual unit.
num_layers: Number of layers in the RevNet block.
dim: '2d' if 2-dimensional, '3d' if 3-dimensional.
bottleneck: Should a bottleneck layer be used.
first_batch_norm: Whether to keep the first batch norm layer or not.
Typically used in the first RevNet block.
stride: Stride for the residual function.
training: True for train phase, False for eval phase.
Returns:
Two [N, H, W, C] output activation tensors.
"""
scope_name = 'unit_%d' % block_num
if bottleneck:
depth1 = depth
depth2 = depth * 4
else:
depth1 = depth2 = depth
residual = wrapped_partial(f,
depth1=depth1, depth2=depth2, dim=dim,
training=training, bottleneck=bottleneck)
with tf.variable_scope(scope_name):
downsample = downsample_bottleneck if bottleneck else downsample_residual
# Manual implementation of downsampling
with tf.variable_scope('downsampling'):
with tf.variable_scope('x1'):
hx1 = downsample(x1, depth2, dim=dim, stride=stride)
fx2 = residual(x2, stride=stride, first_batch_norm=first_batch_norm)
x1 = hx1 + fx2
with tf.variable_scope('x2'):
hx2 = downsample(x2, depth2, dim=dim, stride=stride)
fx1 = residual(x1)
x2 = hx2 + fx1
# Full block using memory-efficient rev_block implementation.
with tf.variable_scope('full_block'):
x1, x2 = tf.contrib.layers.rev_block(x1, x2,
residual,
residual,
num_layers=num_layers)
return x1, x2
|
Converts activations from last RevNet block to pre-logits.
Args:
x1: [NxHxWxC] tensor of network activations.
x2: [NxHxWxC] tensor of network activations.
dim: '2d' if 2-dimensional, '3d' if 3-dimensional.
training: True for train phase, False for eval phase.
scope: Optional variable scope for the final block.
Returns:
[N, hidden_dim] pre-logits tensor from activations x1 and x2.
def final_block(x1, x2, dim='2d', training=True, scope='final_block'):
"""Converts activations from last RevNet block to pre-logits.
Args:
x1: [NxHxWxC] tensor of network activations.
x2: [NxHxWxC] tensor of network activations.
dim: '2d' if 2-dimensional, '3d' if 3-dimensional.
training: True for train phase, False for eval phase.
scope: Optional variable scope for the final block.
Returns:
[N, hidden_dim] pre-logits tensor from activations x1 and x2.
"""
# Final batch norm and relu
with tf.variable_scope(scope):
y = tf.concat([x1, x2], axis=CONFIG[dim]['split_axis'])
y = tf.layers.batch_normalization(y, training=training)
y = tf.nn.relu(y)
# Global average pooling
net = tf.reduce_mean(y, CONFIG[dim]['reduction_dimensions'],
name='final_pool', keep_dims=True)
return net
|
Uses Tensor2Tensor memory optimized RevNet block to build a RevNet.
Args:
inputs: [NxHxWx3] tensor of input images to the model.
hparams: HParams object that contains the following parameters,
in addition to the parameters contained in the basic_params1() object in
the common_hparams module:
num_channels_first - A Python list where each element represents the
depth of the first and third convolutional layers in the bottleneck
residual unit for a given block.
num_channels_second - A Python list where each element represents the
depth of the second convolutional layer in the bottleneck residual
unit for a given block.
num_layers_per_block - A Python list containing the number of RevNet
layers for each block.
first_batch_norm - A Python list containing booleans representing the
presence of a batch norm layer at the beginning of a given block.
strides - A Python list containing integers representing the stride of
the residual function for each block.
num_channels_init_block - An integer representing the number of channels
for the convolutional layer in the initial block.
dimension - A string (either "2d" or "3d") that decides if the RevNet is
2-dimensional or 3-dimensional.
reuse: Whether to reuse the default variable scope.
Returns:
[batch_size, hidden_dim] pre-logits tensor from the bottleneck RevNet.
def revnet(inputs, hparams, reuse=None):
"""Uses Tensor2Tensor memory optimized RevNet block to build a RevNet.
Args:
inputs: [NxHxWx3] tensor of input images to the model.
hparams: HParams object that contains the following parameters,
in addition to the parameters contained in the basic_params1() object in
the common_hparams module:
num_channels_first - A Python list where each element represents the
depth of the first and third convolutional layers in the bottleneck
residual unit for a given block.
num_channels_second - A Python list where each element represents the
depth of the second convolutional layer in the bottleneck residual
unit for a given block.
num_layers_per_block - A Python list containing the number of RevNet
layers for each block.
first_batch_norm - A Python list containing booleans representing the
presence of a batch norm layer at the beginning of a given block.
strides - A Python list containing integers representing the stride of
the residual function for each block.
num_channels_init_block - An integer representing the number of channels
for the convolutional layer in the initial block.
dimension - A string (either "2d" or "3d") that decides if the RevNet is
2-dimensional or 3-dimensional.
reuse: Whether to reuse the default variable scope.
Returns:
[batch_size, hidden_dim] pre-logits tensor from the bottleneck RevNet.
"""
training = hparams.mode == tf.estimator.ModeKeys.TRAIN
with tf.variable_scope('RevNet', reuse=reuse):
x1, x2 = init(inputs,
num_channels=hparams.num_channels_init_block,
dim=hparams.dim,
kernel_size=hparams.init_kernel_size,
maxpool=hparams.init_maxpool,
stride=hparams.init_stride,
training=training)
for block_num in range(len(hparams.num_layers_per_block)):
block = {'depth': hparams.num_channels[block_num],
'num_layers': hparams.num_layers_per_block[block_num],
'first_batch_norm': hparams.first_batch_norm[block_num],
'stride': hparams.strides[block_num],
'bottleneck': hparams.bottleneck}
x1, x2 = unit(x1, x2, block_num, dim=hparams.dim, training=training,
**block)
pre_logits = final_block(x1, x2, dim=hparams.dim, training=training)
return pre_logits
|
Default hparams for Revnet.
def revnet_base():
"""Default hparams for Revnet."""
hparams = common_hparams.basic_params1()
hparams.add_hparam('num_channels', [64, 128, 256, 416])
hparams.add_hparam('num_layers_per_block', [1, 1, 10, 1])
hparams.add_hparam('bottleneck', True)
hparams.add_hparam('first_batch_norm', [False, True, True, True])
hparams.add_hparam('init_stride', 2)
hparams.add_hparam('init_kernel_size', 7)
hparams.add_hparam('init_maxpool', True)
hparams.add_hparam('strides', [1, 2, 2, 2])
hparams.add_hparam('num_channels_init_block', 64)
hparams.add_hparam('dim', '2d')
# Variable init
hparams.initializer = 'normal_unit_scaling'
hparams.initializer_gain = 2.
# Optimization
hparams.optimizer = 'Momentum'
hparams.optimizer_momentum_momentum = 0.9
hparams.optimizer_momentum_nesterov = True
hparams.weight_decay = 1e-4
hparams.clip_grad_norm = 0.0
# (base_lr=0.1) * (batch_size=128*8 (on TPU, or 8 GPUs)=1024) / (256.)
hparams.learning_rate = 0.4
hparams.learning_rate_decay_scheme = 'cosine'
# For image_imagenet224, 120k training steps, which effectively makes this a
# cosine decay (i.e. no cycles).
hparams.learning_rate_cosine_cycle_steps = 120000
# Can run with a batch size of 128 with Problem ImageImagenet224
hparams.batch_size = 128
return hparams
|
Tiny hparams suitable for CIFAR/etc.
def revnet_cifar_base():
"""Tiny hparams suitable for CIFAR/etc."""
hparams = revnet_base()
hparams.num_channels_init_block = 32
hparams.first_batch_norm = [False, True, True]
hparams.init_stride = 1
hparams.init_kernel_size = 3
hparams.init_maxpool = False
hparams.strides = [1, 2, 2]
hparams.batch_size = 128
hparams.weight_decay = 1e-4
hparams.learning_rate = 0.1
hparams.learning_rate_cosine_cycle_steps = 5000
return hparams
|
Tiny hparams suitable for CIFAR/etc.
def revnet_110_cifar():
"""Tiny hparams suitable for CIFAR/etc."""
hparams = revnet_cifar_base()
hparams.bottleneck = False
hparams.num_channels = [16, 32, 64]
hparams.num_layers_per_block = [8, 8, 8]
return hparams
|
Tiny hparams suitable for CIFAR/etc.
def revnet_164_cifar():
"""Tiny hparams suitable for CIFAR/etc."""
hparams = revnet_cifar_base()
hparams.bottleneck = True
hparams.num_channels = [16, 32, 64]
hparams.num_layers_per_block = [8, 8, 8]
return hparams
|
Hyperparameters for tuning revnet.
def revnet_range(rhp):
"""Hyperparameters for tuning revnet."""
rhp.set_float('learning_rate', 0.05, 0.2, scale=rhp.LOG_SCALE)
rhp.set_float('weight_decay', 1e-5, 1e-3, scale=rhp.LOG_SCALE)
rhp.set_discrete('num_channels_init_block', [64, 128])
return rhp
|
Basic 2-frame conv model.
def next_frame_basic_deterministic():
"""Basic 2-frame conv model."""
hparams = base.next_frame_base()
hparams.video_num_input_frames = 4
hparams.video_num_target_frames = 1
hparams.hidden_size = 64
hparams.batch_size = 4
hparams.num_hidden_layers = 2
hparams.optimizer = "Adafactor"
hparams.learning_rate_constant = 1.5
hparams.learning_rate_warmup_steps = 8000
hparams.learning_rate_schedule = "linear_warmup * constant * rsqrt_decay"
hparams.label_smoothing = 0.0
hparams.initializer = "uniform_unit_scaling"
hparams.initializer_gain = 1.3
hparams.weight_decay = 0.0
hparams.clip_grad_norm = 1.0
hparams.dropout = 0.1
hparams.add_hparam("residual_dropout", 0.5)
hparams.add_hparam("num_compress_steps", 6)
hparams.add_hparam("filter_double_steps", 2)
hparams.add_hparam("pixel_sampling_temperature", 0.0)
hparams.add_hparam("concat_internal_states", False)
hparams.add_hparam("do_autoregressive_rnn", False)
hparams.add_hparam("autoregressive_rnn_lookback", 8)
hparams.add_hparam("autoregressive_rnn_warmup_steps", 8000)
hparams.add_hparam("activation_fn", "relu")
hparams.bottom["inputs"] = modalities.video_identity_bottom
hparams.bottom["targets"] = modalities.video_identity_bottom
return hparams
|
Basic 2-frame conv model with pixel noise.
def next_frame_pixel_noise():
"""Basic 2-frame conv model with pixel noise."""
hparams = next_frame_basic_deterministic()
hparams.add_hparam("video_modality_input_noise", 0.05)
hparams.bottom["inputs"] = modalities.video_pixel_noise_bottom
hparams.top["inputs"] = modalities.video_top
return hparams
|
Basic conv model with scheduled sampling.
def next_frame_sampling():
"""Basic conv model with scheduled sampling."""
hparams = next_frame_basic_deterministic()
hparams.scheduled_sampling_mode = "prob_inverse_exp"
hparams.scheduled_sampling_max_prob = 1.0
hparams.scheduled_sampling_decay_steps = 10000
return hparams
|
Conv autoencoder.
def next_frame_ae():
"""Conv autoencoder."""
hparams = next_frame_basic_deterministic()
hparams.bottom["inputs"] = modalities.video_bitwise_bottom
hparams.top["inputs"] = modalities.video_top
hparams.hidden_size = 256
hparams.batch_size = 8
hparams.num_hidden_layers = 4
hparams.num_compress_steps = 4
hparams.dropout = 0.4
return hparams
|
Conv autoencoder, tiny set for testing.
def next_frame_ae_tiny():
"""Conv autoencoder, tiny set for testing."""
hparams = next_frame_tiny()
hparams.bottom["inputs"] = modalities.video_bitwise_bottom
hparams.top["inputs"] = modalities.video_top
hparams.batch_size = 8
hparams.dropout = 0.4
return hparams
|
Tiny for testing.
def next_frame_tiny():
"""Tiny for testing."""
hparams = next_frame_basic_deterministic()
hparams.hidden_size = 32
hparams.num_hidden_layers = 1
hparams.num_compress_steps = 2
hparams.filter_double_steps = 1
return hparams
|
Basic conv model with L1 modality.
def next_frame_l1():
"""Basic conv model with L1 modality."""
hparams = next_frame_basic_deterministic()
hparams.loss["targets"] = modalities.video_l1_loss
hparams.top["targets"] = modalities.video_l1_top
hparams.video_modality_loss_cutoff = 2.4
return hparams
|
Basic conv model with L2 modality.
def next_frame_l2():
"""Basic conv model with L2 modality."""
hparams = next_frame_basic_deterministic()
hparams.loss["targets"] = modalities.video_l2_loss
hparams.top["targets"] = modalities.video_l1_top
hparams.video_modality_loss_cutoff = 2.4
return hparams
|
Basic tuning grid.
def next_frame_base_range(rhp):
"""Basic tuning grid."""
rhp.set_float("dropout", 0.2, 0.6)
rhp.set_discrete("hidden_size", [64, 128, 256])
rhp.set_int("num_compress_steps", 5, 8)
rhp.set_discrete("batch_size", [4, 8, 16, 32])
rhp.set_int("num_hidden_layers", 1, 3)
rhp.set_int("filter_double_steps", 1, 6)
rhp.set_float("learning_rate_constant", 1., 4.)
rhp.set_int("learning_rate_warmup_steps", 500, 3000)
rhp.set_float("initializer_gain", 0.8, 1.8)
|
Autoencoder world model tuning grid.
def next_frame_ae_range(rhp):
"""Autoencoder world model tuning grid."""
rhp.set_float("dropout", 0.3, 0.5)
rhp.set_int("num_compress_steps", 1, 3)
rhp.set_int("num_hidden_layers", 2, 6)
rhp.set_float("learning_rate_constant", 1., 2.)
rhp.set_float("initializer_gain", 0.8, 1.5)
rhp.set_int("filter_double_steps", 2, 3)
|
Series of architectures for language modeling.
def mqp_lm1b_base():
"""Series of architectures for language modeling."""
hparams = mtf_transformer2.mtf_unitransformer_base()
hparams.d_model = 1024
hparams.max_length = 256
hparams.batch_size = 256
# Parameters for my_layer_stack()
hparams.num_hidden_layers = 6
hparams.d_ff = 8192
hparams.d_kv = 128
hparams.num_heads = 8
hparams.learning_rate_decay_steps = 13600
hparams.layout = "batch:batch;vocab:model;d_ff:model;heads:model"
hparams.mesh_shape = "batch:32"
return hparams
|
Initializes env_specs using the appropriate env.
def initialize_env_specs(hparams, env_problem_name):
"""Initializes env_specs using the appropriate env."""
if env_problem_name:
env = registry.env_problem(env_problem_name, batch_size=hparams.batch_size)
else:
env = rl_utils.setup_env(hparams, hparams.batch_size,
hparams.eval_max_num_noops,
hparams.rl_env_max_episode_steps,
env_name=hparams.rl_env_name)
env.start_new_epoch(0)
return rl.make_real_env_fn(env)
|
Train.
def train(hparams, output_dir, env_problem_name, report_fn=None):
"""Train."""
env_fn = initialize_env_specs(hparams, env_problem_name)
tf.logging.vlog(1, "HParams in trainer_model_free.train : %s",
misc_utils.pprint_hparams(hparams))
tf.logging.vlog(1, "Using hparams.base_algo: %s", hparams.base_algo)
learner = rl_utils.LEARNERS[hparams.base_algo](
hparams.frame_stack_size, output_dir, output_dir, total_num_epochs=1
)
policy_hparams = trainer_lib.create_hparams(hparams.base_algo_params)
rl_utils.update_hparams_from_hparams(
policy_hparams, hparams, hparams.base_algo + "_"
)
tf.logging.vlog(1, "Policy HParams : %s",
misc_utils.pprint_hparams(policy_hparams))
# TODO(konradczechowski): remove base_algo dependance, when evaluation method
# will be decided
if hparams.base_algo == "ppo":
total_steps = policy_hparams.epochs_num
tf.logging.vlog(2, "total_steps: %d", total_steps)
eval_every_epochs = policy_hparams.eval_every_epochs
tf.logging.vlog(2, "eval_every_epochs: %d", eval_every_epochs)
if eval_every_epochs == 0:
eval_every_epochs = total_steps
policy_hparams.eval_every_epochs = 0
metric_name = rl_utils.get_metric_name(
sampling_temp=hparams.eval_sampling_temps[0],
max_num_noops=hparams.eval_max_num_noops,
clipped=False
)
tf.logging.vlog(1, "metric_name: %s", metric_name)
eval_metrics_dir = os.path.join(output_dir, "eval_metrics")
eval_metrics_dir = os.path.expanduser(eval_metrics_dir)
tf.gfile.MakeDirs(eval_metrics_dir)
eval_metrics_writer = tf.summary.FileWriter(eval_metrics_dir)
def evaluate_on_new_model(model_dir_path):
global step
eval_metrics = rl_utils.evaluate_all_configs(hparams, model_dir_path)
tf.logging.info(
"Agent eval metrics:\n{}".format(pprint.pformat(eval_metrics)))
rl_utils.summarize_metrics(eval_metrics_writer, eval_metrics, step)
if report_fn:
report_fn(eval_metrics[metric_name], step)
step += 1
policy_hparams.epochs_num = total_steps
policy_hparams.save_models_every_epochs = eval_every_epochs
else:
def evaluate_on_new_model(model_dir_path):
del model_dir_path
raise NotImplementedError(
"This function is currently implemented only for ppo")
learner.train(env_fn,
policy_hparams,
simulated=False,
save_continuously=True,
epoch=0,
model_save_fn=evaluate_on_new_model)
|
Compute the designated learning rate factor from hparams.
def learning_rate_factor(name, step_num, hparams):
"""Compute the designated learning rate factor from hparams."""
if name == "constant":
tf.logging.info("Base learning rate: %f", hparams.learning_rate_constant)
return hparams.learning_rate_constant
elif name == "linear_warmup":
return tf.minimum(1.0, step_num / hparams.learning_rate_warmup_steps)
elif name == "linear_decay":
ret = (hparams.train_steps - step_num) / hparams.learning_rate_decay_steps
return tf.minimum(1.0, tf.maximum(0.0, ret))
elif name == "cosdecay": # openai gpt
in_warmup = tf.cast(step_num <= hparams.learning_rate_warmup_steps,
dtype=tf.float32)
ret = 0.5 * (1 + tf.cos(
np.pi * step_num / hparams.learning_rate_decay_steps))
# if in warmup stage return 1 else return the decayed value
return in_warmup * 1 + (1 - in_warmup) * ret
elif name == "single_cycle_cos_decay":
# Cosine decay to zero with a single cycle. This is different from
# "cosdecay" because it starts at 1 when the warmup steps end.
x = tf.maximum(step_num, hparams.learning_rate_warmup_steps)
step = x - hparams.learning_rate_warmup_steps
return tf.math.cos(
step * np.pi / hparams.learning_rate_decay_steps) / 2.0 + 0.5
elif name == "rsqrt_decay":
return tf.rsqrt(tf.maximum(step_num, hparams.learning_rate_warmup_steps))
elif name == "rsqrt_normalized_decay":
scale = tf.sqrt(tf.to_float(hparams.learning_rate_warmup_steps))
return scale * tf.rsqrt(tf.maximum(
step_num, hparams.learning_rate_warmup_steps))
elif name == "exp_decay":
decay_steps = hparams.learning_rate_decay_steps
warmup_steps = hparams.learning_rate_warmup_steps
p = (step_num - warmup_steps) / decay_steps
p = tf.maximum(p, 0.)
if hparams.learning_rate_decay_staircase:
p = tf.floor(p)
return tf.pow(hparams.learning_rate_decay_rate, p)
elif name == "rsqrt_hidden_size":
return hparams.hidden_size ** -0.5
elif name == "legacy":
return legacy_learning_rate_schedule(hparams)
else:
raise ValueError("unknown learning rate factor %s" % name)
|
Learning rate schedule based on hparams.
def learning_rate_schedule(hparams):
"""Learning rate schedule based on hparams."""
mlperf_log.transformer_print(key=mlperf_log.OPT_LR, deferred=True)
mlperf_log.transformer_print(
key=mlperf_log.OPT_LR_WARMUP_STEPS,
value=hparams.learning_rate_warmup_steps)
step_num = _global_step(hparams)
schedule_string = hparams.learning_rate_schedule
names = schedule_string.split("*")
names = [name.strip() for name in names if name.strip()]
ret = tf.constant(1.0)
for name in names:
ret *= learning_rate_factor(name, step_num, hparams)
return ret
|
Backwards-compatible learning-rate schedule.
def legacy_learning_rate_schedule(hparams):
"""Backwards-compatible learning-rate schedule."""
step_num = _global_step(hparams)
warmup_steps = tf.to_float(hparams.learning_rate_warmup_steps)
if hparams.learning_rate_decay_scheme == "noam":
ret = 5000.0 * hparams.hidden_size**-0.5 * tf.minimum(
(step_num + 1) * warmup_steps**-1.5, (step_num + 1)**-0.5)
else:
warmup_steps = hparams.learning_rate_warmup_steps
warmup = _learning_rate_warmup(warmup_steps, hparams=hparams)
decay = _learning_rate_decay(hparams, warmup_steps)
ret = tf.where(step_num < warmup_steps, warmup, decay)
optimizer_correction = 0.002 if "adam" in hparams.optimizer else 1.0
tf.logging.info("Base learning rate: %f", hparams.learning_rate)
return ret * optimizer_correction * hparams.learning_rate
|
Adjust global step if a multi-step optimizer is used.
def _global_step(hparams):
"""Adjust global step if a multi-step optimizer is used."""
step = tf.to_float(tf.train.get_or_create_global_step())
multiplier = hparams.optimizer_multistep_accumulate_steps
if not multiplier:
return step
tf.logging.info("Dividing global step by %d for multi-step optimizer."
% multiplier)
return step / tf.to_float(multiplier)
|
Scale learning rate according to the given schedule.
Multipliers are not cumulative.
Args:
step: global step
boundaries: List of steps to transition on.
values: Multiplier to apply at each boundary transition.
Returns:
Scaled value for the learning rate.
def _piecewise_learning_rate(step, boundaries, values):
"""Scale learning rate according to the given schedule.
Multipliers are not cumulative.
Args:
step: global step
boundaries: List of steps to transition on.
values: Multiplier to apply at each boundary transition.
Returns:
Scaled value for the learning rate.
"""
values = [1.0] + values
boundaries = [float(x) for x in boundaries]
return tf.train.piecewise_constant(
step, boundaries, values, name="piecewise_lr")
|
Learning rate decay multiplier.
def _learning_rate_decay(hparams, warmup_steps=0):
"""Learning rate decay multiplier."""
scheme = hparams.learning_rate_decay_scheme
warmup_steps = tf.to_float(warmup_steps)
global_step = _global_step(hparams)
if not scheme or scheme == "none":
return tf.constant(1.)
tf.logging.info("Applying learning rate decay: %s.", scheme)
if scheme == "exp":
decay_steps = hparams.learning_rate_decay_steps
p = (global_step - warmup_steps) / decay_steps
if hparams.learning_rate_decay_staircase:
p = tf.floor(p)
return tf.pow(hparams.learning_rate_decay_rate, p)
if scheme == "piecewise":
return _piecewise_learning_rate(global_step,
hparams.learning_rate_boundaries,
hparams.learning_rate_multiples)
if scheme == "cosine":
cycle_steps = hparams.learning_rate_cosine_cycle_steps
cycle_position = global_step % (2 * cycle_steps)
cycle_position = cycle_steps - tf.abs(cycle_steps - cycle_position)
return 0.5 * (1 + tf.cos(np.pi * cycle_position / cycle_steps))
if scheme == "cyclelinear10x":
# Cycle the rate linearly by 10x every warmup_steps, up and down.
cycle_steps = warmup_steps
cycle_position = global_step % (2 * cycle_steps)
cycle_position = tf.to_float( # Normalize to the interval [-1, 1].
cycle_position - cycle_steps) / float(cycle_steps)
cycle_position = 1.0 - tf.abs(cycle_position) # 0 to 1 and back to 0.
return (cycle_position + 0.1) * 3.0 # 10x difference each cycle (0.3-3).
if scheme == "sqrt":
return _legacy_sqrt_decay(global_step - warmup_steps)
raise ValueError("Unrecognized learning rate decay scheme: %s" %
hparams.learning_rate_decay_scheme)
|
Learning rate warmup multiplier.
def _learning_rate_warmup(warmup_steps, warmup_schedule="exp", hparams=None):
"""Learning rate warmup multiplier."""
if not warmup_steps:
return tf.constant(1.)
tf.logging.info("Applying %s learning rate warmup for %d steps",
warmup_schedule, warmup_steps)
warmup_steps = tf.to_float(warmup_steps)
global_step = _global_step(hparams)
if warmup_schedule == "exp":
return tf.exp(tf.log(0.01) / warmup_steps)**(warmup_steps - global_step)
else:
assert warmup_schedule == "linear"
start = tf.constant(0.35)
return ((tf.constant(1.) - start) / warmup_steps) * global_step + start
|
Returns True if `find` is a subtree of `expr`.
def is_in_expr(expr, find):
"""Returns True if `find` is a subtree of `expr`."""
return expr == find or (isinstance(expr, ExprNode) and expr.is_in(find))
|
Generate a random expression tree with a required variable.
The required variable appears exactly once in the expression.
Args:
depth: At least one leaf will be this many levels down from the top.
required_var: A char. This char is guaranteed to be placed exactly once at
a leaf somewhere in the tree. This is the var to solve for.
optional_list: A list of chars. These chars are randomly selected as leaf
values. These are constant vars.
ops: A list of ExprOp instances.
Returns:
An ExprNode instance which is the root of the generated expression tree.
def random_expr_with_required_var(depth, required_var, optional_list, ops):
"""Generate a random expression tree with a required variable.
The required variable appears exactly once in the expression.
Args:
depth: At least one leaf will be this many levels down from the top.
required_var: A char. This char is guaranteed to be placed exactly once at
a leaf somewhere in the tree. This is the var to solve for.
optional_list: A list of chars. These chars are randomly selected as leaf
values. These are constant vars.
ops: A list of ExprOp instances.
Returns:
An ExprNode instance which is the root of the generated expression tree.
"""
if not depth:
if required_var:
return required_var
return str(optional_list[random.randrange(len(optional_list))])
max_depth_side = random.randrange(2)
other_side_depth = random.randrange(depth)
required_var_side = random.randrange(2)
left = random_expr_with_required_var(
depth - 1 if max_depth_side else other_side_depth, required_var
if required_var_side else None, optional_list, ops)
right = random_expr_with_required_var(
depth - 1 if not max_depth_side else other_side_depth, required_var
if not required_var_side else None, optional_list, ops)
op = ops[random.randrange(len(ops))]
return ExprNode(left, right, op)
|
Generate a random expression tree.
Args:
depth: At least one leaf will be this many levels down from the top.
vlist: A list of chars. These chars are randomly selected as leaf values.
ops: A list of ExprOp instances.
Returns:
An ExprNode instance which is the root of the generated expression tree.
def random_expr(depth, vlist, ops):
"""Generate a random expression tree.
Args:
depth: At least one leaf will be this many levels down from the top.
vlist: A list of chars. These chars are randomly selected as leaf values.
ops: A list of ExprOp instances.
Returns:
An ExprNode instance which is the root of the generated expression tree.
"""
if not depth:
return str(vlist[random.randrange(len(vlist))])
max_depth_side = random.randrange(2)
other_side_depth = random.randrange(depth)
left = random_expr(depth - 1
if max_depth_side else other_side_depth, vlist, ops)
right = random_expr(depth - 1
if not max_depth_side else other_side_depth, vlist, ops)
op = ops[random.randrange(len(ops))]
return ExprNode(left, right, op)
|
Solves for the value of the given var in an expression.
Args:
left: The root of the ExprNode tree on the left side of the equals sign.
right: The root of the ExprNode tree on the right side of the equals sign.
var: A char. The variable to solve for.
solve_ops: A dictionary with the following properties.
* For each operator in the expression, there is a rule that determines
how to cancel out a value either to the left or the right of that
operator.
* For each rule, there is an entry in the dictionary. The key is two
chars- the op char, and either 'l' or 'r' meaning rule for canceling
out the left or right sides. For example, '+l', '+r', '-l', '-r'.
* The value of each entry is a function with the following signature:
(left, right, to_tree) -> (new_from_tree, new_to_tree)
left- Expression on left side of the op.
right- Expression on the right side of the op.
to_tree- The tree on the other side of the equal sign. The canceled
out expression will be moved here.
new_from_tree- The resulting from_tree after the algebraic
manipulation.
new_to_tree- The resulting to_tree after the algebraic manipulation.
Returns:
The root of an ExprNode tree which holds the value of `var` after solving.
Raises:
ValueError: If `var` does not appear exactly once in the equation (which
includes the left and right sides).
def algebra_inverse_solve(left, right, var, solve_ops):
"""Solves for the value of the given var in an expression.
Args:
left: The root of the ExprNode tree on the left side of the equals sign.
right: The root of the ExprNode tree on the right side of the equals sign.
var: A char. The variable to solve for.
solve_ops: A dictionary with the following properties.
* For each operator in the expression, there is a rule that determines
how to cancel out a value either to the left or the right of that
operator.
* For each rule, there is an entry in the dictionary. The key is two
chars- the op char, and either 'l' or 'r' meaning rule for canceling
out the left or right sides. For example, '+l', '+r', '-l', '-r'.
* The value of each entry is a function with the following signature:
(left, right, to_tree) -> (new_from_tree, new_to_tree)
left- Expression on left side of the op.
right- Expression on the right side of the op.
to_tree- The tree on the other side of the equal sign. The canceled
out expression will be moved here.
new_from_tree- The resulting from_tree after the algebraic
manipulation.
new_to_tree- The resulting to_tree after the algebraic manipulation.
Returns:
The root of an ExprNode tree which holds the value of `var` after solving.
Raises:
ValueError: If `var` does not appear exactly once in the equation (which
includes the left and right sides).
"""
is_in_left = is_in_expr(left, var)
is_in_right = is_in_expr(right, var)
if is_in_left == is_in_right:
if is_in_left:
raise ValueError("Solve-variable '%s' is on both sides of the equation. "
"Only equations where the solve variable-appears once "
"are supported by this solver. Left: '%s', right: '%s'" %
(var, str(left), str(right)))
else:
raise ValueError("Solve-variable '%s' is not present in the equation. It "
"must appear once. Left: '%s', right: '%s'" %
(var, str(left), str(right)))
from_tree = left if is_in_left else right
to_tree = left if not is_in_left else right
while from_tree != var:
is_in_left = is_in_expr(from_tree.left, var)
is_in_right = is_in_expr(from_tree.right, var)
from_tree, to_tree = (solve_ops[str(from_tree.op)
+ ("l" if is_in_left else "r")](
from_tree.left, from_tree.right,
to_tree))
return to_tree
|
Convert sympy expression into a string which can be encoded.
Args:
sympy_expr: Any sympy expression tree or string.
functions: Defines special functions. A dict mapping human readable string
names, like "log", "exp", "sin", "cos", etc., to single chars. Each
function gets a unique token, like "L" for "log".
Returns:
A string representation of the expression suitable for encoding as a
sequence input.
def format_sympy_expr(sympy_expr, functions=None):
"""Convert sympy expression into a string which can be encoded.
Args:
sympy_expr: Any sympy expression tree or string.
functions: Defines special functions. A dict mapping human readable string
names, like "log", "exp", "sin", "cos", etc., to single chars. Each
function gets a unique token, like "L" for "log".
Returns:
A string representation of the expression suitable for encoding as a
sequence input.
"""
if functions is None:
functions = {}
str_expr = str(sympy_expr)
result = str_expr.replace(" ", "")
for fn_name, char in six.iteritems(functions):
result = result.replace(fn_name, char)
return result
|
Randomly generate an algebra inverse dataset sample.
Given an input equation and variable, produce the expression equal to the
variable.
Args:
vlist: Variable list. List of chars that can be used in the expression.
ops: List of ExprOp instances. The allowed operators for the expression.
solve_ops: See `solve_ops` documentation in `algebra_inverse_solve`.
min_depth: Expression trees will not have a smaller depth than this. 0 means
there is just a variable. 1 means there is one operation.
max_depth: Expression trees will not have a larger depth than this. To make
all trees have the same depth, set this equal to `min_depth`.
Returns:
sample: String representation of the input. Will be of the form
'solve_var:left_side=right_side'.
target: String representation of the solution.
def generate_algebra_inverse_sample(vlist, ops, solve_ops, min_depth,
max_depth):
"""Randomly generate an algebra inverse dataset sample.
Given an input equation and variable, produce the expression equal to the
variable.
Args:
vlist: Variable list. List of chars that can be used in the expression.
ops: List of ExprOp instances. The allowed operators for the expression.
solve_ops: See `solve_ops` documentation in `algebra_inverse_solve`.
min_depth: Expression trees will not have a smaller depth than this. 0 means
there is just a variable. 1 means there is one operation.
max_depth: Expression trees will not have a larger depth than this. To make
all trees have the same depth, set this equal to `min_depth`.
Returns:
sample: String representation of the input. Will be of the form
'solve_var:left_side=right_side'.
target: String representation of the solution.
"""
side = random.randrange(2)
left_depth = random.randrange(min_depth if side else 0, max_depth + 1)
right_depth = random.randrange(min_depth if not side else 0, max_depth + 1)
var_index = random.randrange(len(vlist))
var = vlist[var_index]
consts = vlist[:var_index] + vlist[var_index + 1:]
left = random_expr_with_required_var(left_depth, var
if side else None, consts, ops)
right = random_expr_with_required_var(right_depth, var
if not side else None, consts, ops)
left_str = str(left)
right_str = str(right)
target = str(algebra_inverse_solve(left, right, var, solve_ops))
sample = "%s:%s=%s" % (var, left_str, right_str)
return sample, target
|
Randomly generate an algebra simplify dataset sample.
Given an input expression, produce the simplified expression.
Args:
vlist: Variable list. List of chars that can be used in the expression.
ops: List of ExprOp instances. The allowed operators for the expression.
min_depth: Expression trees will not have a smaller depth than this. 0 means
there is just a variable. 1 means there is one operation.
max_depth: Expression trees will not have a larger depth than this. To make
all trees have the same depth, set this equal to `min_depth`.
Returns:
sample: String representation of the input.
target: String representation of the solution.
def generate_algebra_simplify_sample(vlist, ops, min_depth, max_depth):
"""Randomly generate an algebra simplify dataset sample.
Given an input expression, produce the simplified expression.
Args:
vlist: Variable list. List of chars that can be used in the expression.
ops: List of ExprOp instances. The allowed operators for the expression.
min_depth: Expression trees will not have a smaller depth than this. 0 means
there is just a variable. 1 means there is one operation.
max_depth: Expression trees will not have a larger depth than this. To make
all trees have the same depth, set this equal to `min_depth`.
Returns:
sample: String representation of the input.
target: String representation of the solution.
"""
depth = random.randrange(min_depth, max_depth + 1)
expr = random_expr(depth, vlist, ops)
sample = str(expr)
target = format_sympy_expr(sympy.simplify(sample))
return sample, target
|
Randomly generate a symbolic integral dataset sample.
Given an input expression, produce the indefinite integral.
Args:
vlist: Variable list. List of chars that can be used in the expression.
ops: List of ExprOp instances. The allowed operators for the expression.
min_depth: Expression trees will not have a smaller depth than this. 0 means
there is just a variable. 1 means there is one operation.
max_depth: Expression trees will not have a larger depth than this. To make
all trees have the same depth, set this equal to `min_depth`.
functions: Defines special functions. A dict mapping human readable string
names, like "log", "exp", "sin", "cos", etc., to single chars. Each
function gets a unique token, like "L" for "log".
Returns:
sample: String representation of the input. Will be of the form
'var:expression'.
target: String representation of the solution.
def generate_calculus_integrate_sample(vlist, ops, min_depth, max_depth,
functions):
"""Randomly generate a symbolic integral dataset sample.
Given an input expression, produce the indefinite integral.
Args:
vlist: Variable list. List of chars that can be used in the expression.
ops: List of ExprOp instances. The allowed operators for the expression.
min_depth: Expression trees will not have a smaller depth than this. 0 means
there is just a variable. 1 means there is one operation.
max_depth: Expression trees will not have a larger depth than this. To make
all trees have the same depth, set this equal to `min_depth`.
functions: Defines special functions. A dict mapping human readable string
names, like "log", "exp", "sin", "cos", etc., to single chars. Each
function gets a unique token, like "L" for "log".
Returns:
sample: String representation of the input. Will be of the form
'var:expression'.
target: String representation of the solution.
"""
var_index = random.randrange(len(vlist))
var = vlist[var_index]
consts = vlist[:var_index] + vlist[var_index + 1:]
depth = random.randrange(min_depth, max_depth + 1)
expr = random_expr_with_required_var(depth, var, consts, ops)
expr_str = str(expr)
sample = var + ":" + expr_str
target = format_sympy_expr(
sympy.integrate(expr_str, sympy.Symbol(var)), functions=functions)
return sample, target
|
Initializes required objects to generate symbolic math datasets.
Produces token set, ExprOp instances, solve_op dictionary, encoders, and
decoders needed to generate the algebra inverse dataset.
Args:
alphabet_size: How many possible variables there are. Max 52.
digits: How many numerical digits to encode as tokens, "0" through
str(digits-1), or None to encode no digits.
functions: Defines special functions. A dict mapping human readable string
names, like "log", "exp", "sin", "cos", etc., to single chars. Each
function gets a unique token, like "L" for "log".
WARNING, Make sure these tokens do not conflict with the list of
possible variable names.
Returns:
AlgebraConfig instance holding all the objects listed above.
Raises:
ValueError: If `alphabet_size` is not in range [2, 52].
def math_dataset_init(alphabet_size=26, digits=None, functions=None):
"""Initializes required objects to generate symbolic math datasets.
Produces token set, ExprOp instances, solve_op dictionary, encoders, and
decoders needed to generate the algebra inverse dataset.
Args:
alphabet_size: How many possible variables there are. Max 52.
digits: How many numerical digits to encode as tokens, "0" through
str(digits-1), or None to encode no digits.
functions: Defines special functions. A dict mapping human readable string
names, like "log", "exp", "sin", "cos", etc., to single chars. Each
function gets a unique token, like "L" for "log".
WARNING, Make sure these tokens do not conflict with the list of
possible variable names.
Returns:
AlgebraConfig instance holding all the objects listed above.
Raises:
ValueError: If `alphabet_size` is not in range [2, 52].
"""
ops_list = ["+", "-", "*", "/"]
ops = {
"+": ExprOp("+", 0, True),
"-": ExprOp("-", 0, False),
"*": ExprOp("*", 1, True),
"/": ExprOp("/", 1, False)
}
solve_ops = {
"+l": lambda l, r, to: (l, ExprNode(to, r, ops["-"])),
"+r": lambda l, r, to: (r, ExprNode(to, l, ops["-"])),
"-l": lambda l, r, to: (l, ExprNode(to, r, ops["+"])),
"-r": lambda l, r, to: (r, ExprNode(l, to, ops["-"])),
"*l": lambda l, r, to: (l, ExprNode(to, r, ops["/"])),
"*r": lambda l, r, to: (r, ExprNode(to, l, ops["/"])),
"/l": lambda l, r, to: (l, ExprNode(to, r, ops["*"])),
"/r": lambda l, r, to: (r, ExprNode(l, to, ops["/"])),
}
alphabet = (
[six.int2byte(ord("a") + c).decode("utf-8") for c in range(26)] +
[six.int2byte(ord("A") + c).decode("utf-8") for c in range(26)])
if alphabet_size > 52:
raise ValueError(
"alphabet_size cannot be greater than 52. Got %s." % alphabet_size)
if alphabet_size < 2:
raise ValueError(
"alphabet_size cannot be less than 2. Got %s." % alphabet_size)
if digits is not None and not 1 <= digits <= 10:
raise ValueError("digits cannot must be between 1 and 10. Got %s." % digits)
vlist = alphabet[:alphabet_size]
if digits is not None:
dlist = [str(d) for d in range(digits)]
else:
dlist = []
if functions is None:
functions = {}
flist = sorted(functions.values())
pad = "_"
tokens = [pad] + [":", "(", ")", "="] + ops_list + vlist + dlist + flist
if len(tokens) != len(set(tokens)):
raise ValueError("Duplicate token. Tokens: %s" % tokens)
token_map = dict([(t, i) for i, t in enumerate(tokens)])
def int_encoder(sequence):
return [token_map[s] for s in sequence]
def int_decoder(tensor_1d):
return "".join([tokens[i] for i in tensor_1d])
return AlgebraConfig(
vlist=vlist,
dlist=dlist,
flist=flist,
functions=functions,
ops=ops,
solve_ops=solve_ops,
int_encoder=int_encoder,
int_decoder=int_decoder)
|
Generate the algebra inverse dataset.
Each sample is a symbolic math equation involving unknown variables. The
task is to solve for the given variable. The target is the resulting
expression.
Args:
alphabet_size: How many possible variables there are. Max 52.
min_depth: Minimum depth of the expression trees on both sides of the
equals sign in the equation.
max_depth: Maximum depth of the expression trees on both sides of the
equals sign in the equation.
nbr_cases: The number of cases to generate.
Yields:
A dictionary {"inputs": input-list, "targets": target-list} where
input-list are the tokens encoding the variable to solve for and the math
equation, and target-list is a list of tokens encoding the resulting math
expression after solving for the variable.
Raises:
ValueError: If `max_depth` < `min_depth`.
def algebra_inverse(alphabet_size=26, min_depth=0, max_depth=2,
nbr_cases=10000):
"""Generate the algebra inverse dataset.
Each sample is a symbolic math equation involving unknown variables. The
task is to solve for the given variable. The target is the resulting
expression.
Args:
alphabet_size: How many possible variables there are. Max 52.
min_depth: Minimum depth of the expression trees on both sides of the
equals sign in the equation.
max_depth: Maximum depth of the expression trees on both sides of the
equals sign in the equation.
nbr_cases: The number of cases to generate.
Yields:
A dictionary {"inputs": input-list, "targets": target-list} where
input-list are the tokens encoding the variable to solve for and the math
equation, and target-list is a list of tokens encoding the resulting math
expression after solving for the variable.
Raises:
ValueError: If `max_depth` < `min_depth`.
"""
if max_depth < min_depth:
raise ValueError("max_depth must be greater than or equal to min_depth. "
"Got max_depth=%s, min_depth=%s" % (max_depth, min_depth))
alg_cfg = math_dataset_init(alphabet_size)
for _ in range(nbr_cases):
sample, target = generate_algebra_inverse_sample(
alg_cfg.vlist,
list(alg_cfg.ops.values()), alg_cfg.solve_ops, min_depth, max_depth)
yield {
"inputs": alg_cfg.int_encoder(sample),
"targets": alg_cfg.int_encoder(target)
}
|
Generate the algebra simplify dataset.
Each sample is a symbolic math expression involving unknown variables. The
task is to simplify the expression. The target is the resulting expression.
Args:
alphabet_size: How many possible variables there are. Max 52.
min_depth: Minimum depth of the expression trees on both sides of the
equals sign in the equation.
max_depth: Maximum depth of the expression trees on both sides of the
equals sign in the equation.
nbr_cases: The number of cases to generate.
Yields:
A dictionary {"inputs": input-list, "targets": target-list} where
input-list are the tokens encoding the expression to simplify, and
target-list is a list of tokens encoding the resulting math expression after
simplifying.
Raises:
ValueError: If `max_depth` < `min_depth`.
def algebra_simplify(alphabet_size=26,
min_depth=0,
max_depth=2,
nbr_cases=10000):
"""Generate the algebra simplify dataset.
Each sample is a symbolic math expression involving unknown variables. The
task is to simplify the expression. The target is the resulting expression.
Args:
alphabet_size: How many possible variables there are. Max 52.
min_depth: Minimum depth of the expression trees on both sides of the
equals sign in the equation.
max_depth: Maximum depth of the expression trees on both sides of the
equals sign in the equation.
nbr_cases: The number of cases to generate.
Yields:
A dictionary {"inputs": input-list, "targets": target-list} where
input-list are the tokens encoding the expression to simplify, and
target-list is a list of tokens encoding the resulting math expression after
simplifying.
Raises:
ValueError: If `max_depth` < `min_depth`.
"""
if max_depth < min_depth:
raise ValueError("max_depth must be greater than or equal to min_depth. "
"Got max_depth=%s, min_depth=%s" % (max_depth, min_depth))
alg_cfg = math_dataset_init(alphabet_size, digits=5)
for _ in range(nbr_cases):
sample, target = generate_algebra_simplify_sample(
alg_cfg.vlist, list(alg_cfg.ops.values()), min_depth, max_depth)
yield {
"inputs": alg_cfg.int_encoder(sample),
"targets": alg_cfg.int_encoder(target)
}
|
Generate the calculus integrate dataset.
Each sample is a symbolic math expression involving unknown variables. The
task is to take the indefinite integral of the expression. The target is the
resulting expression.
Args:
alphabet_size: How many possible variables there are. Max 26.
min_depth: Minimum depth of the expression trees on both sides of the
equals sign in the equation.
max_depth: Maximum depth of the expression trees on both sides of the
equals sign in the equation.
nbr_cases: The number of cases to generate.
Yields:
A dictionary {"inputs": input-list, "targets": target-list} where
input-list are the tokens encoding the variable to integrate with respect
to and the expression to integrate, and target-list is a list of tokens
encoding the resulting math expression after integrating.
Raises:
ValueError: If `max_depth` < `min_depth`, or if alphabet_size > 26.
def calculus_integrate(alphabet_size=26,
min_depth=0,
max_depth=2,
nbr_cases=10000):
"""Generate the calculus integrate dataset.
Each sample is a symbolic math expression involving unknown variables. The
task is to take the indefinite integral of the expression. The target is the
resulting expression.
Args:
alphabet_size: How many possible variables there are. Max 26.
min_depth: Minimum depth of the expression trees on both sides of the
equals sign in the equation.
max_depth: Maximum depth of the expression trees on both sides of the
equals sign in the equation.
nbr_cases: The number of cases to generate.
Yields:
A dictionary {"inputs": input-list, "targets": target-list} where
input-list are the tokens encoding the variable to integrate with respect
to and the expression to integrate, and target-list is a list of tokens
encoding the resulting math expression after integrating.
Raises:
ValueError: If `max_depth` < `min_depth`, or if alphabet_size > 26.
"""
if max_depth < min_depth:
raise ValueError("max_depth must be greater than or equal to min_depth. "
"Got max_depth=%s, min_depth=%s" % (max_depth, min_depth))
# Don't allow alphabet to use capital letters. Those are reserved for function
# names.
if alphabet_size > 26:
raise ValueError(
"alphabet_size must not be greater than 26. Got %s." % alphabet_size)
functions = {"log": "L"}
alg_cfg = math_dataset_init(alphabet_size, digits=5, functions=functions)
nbr_case = 0
while nbr_case < nbr_cases:
try:
sample, target = generate_calculus_integrate_sample(
alg_cfg.vlist,
list(alg_cfg.ops.values()), min_depth, max_depth, alg_cfg.functions)
yield {
"inputs": alg_cfg.int_encoder(sample),
"targets": alg_cfg.int_encoder(target)
}
except: # pylint:disable=bare-except
continue
if nbr_case % 10000 == 0:
print(" calculus_integrate: generating case %d." % nbr_case)
nbr_case += 1
|
Returns True if `expr` is a subtree.
def is_in(self, expr):
"""Returns True if `expr` is a subtree."""
if expr == self:
return True
is_in_left = is_in_expr(self.left, expr)
is_in_right = is_in_expr(self.right, expr)
return is_in_left or is_in_right
|
Preprocessing steps common to all models.
def preprocess_example_common(example, mode, hparams):
"""Preprocessing steps common to all models."""
if "inputs" in example and hparams.max_input_seq_length > 0:
example["inputs"] = example["inputs"][:hparams.max_input_seq_length]
if hparams.prepend_mode != "none":
if mode == tf.estimator.ModeKeys.PREDICT:
example["partial_targets"] = tf.concat([example["inputs"], [0]], 0)
else:
example["targets"] = tf.concat(
[example["inputs"], [0], example["targets"]], 0)
if "targets" in example and hparams.max_target_seq_length > 0:
example["targets"] = example["targets"][:hparams.max_target_seq_length]
if hparams.split_to_length:
new_example = {}
for k, v in six.iteritems(example):
if k == "targets" or k == "inputs":
new_example[k] = tf.reshape(v, [-1, hparams.split_to_length, 1, 1])
else:
tf.logging.warning("Dropping feature %s" % k)
return tf.data.Dataset.from_tensor_slices(new_example)
return example
|
Use input modality, vocab, and space id for target.
def _copy_problem_hparams(p_hparams):
"""Use input modality, vocab, and space id for target."""
p = p_hparams
# Duplicate input modality.
p.modality["targets"] = p.modality["inputs"]
# Duplicate input vocab size.
p.vocab_size["targets"] = p.vocab_size["inputs"]
# Duplicate input vocabulary.
p.vocabulary["targets"] = p.vocabulary["inputs"]
# Duplicate input space ids.
p.target_space_id = p.input_space_id
# Mark that p was reversed.
p.was_copy = True
|
Swap input/output modalities, vocab, and space ids.
def _reverse_problem_hparams(p_hparams):
"""Swap input/output modalities, vocab, and space ids."""
p = p_hparams
# Swap modalities.
# TODO(trandustin): Note this assumes target modalities have feature name
# 'target', and each intended feature to swap has feature name 'input'.
# In the future, remove need for this behavior.
reversed_modality = {}
for feature_name in p.modality:
reversed_feature_name = feature_name.replace("target", "input")
if "target" in feature_name and reversed_feature_name in p.modality:
reversed_modality[feature_name] = p.modality[reversed_feature_name]
reversed_modality[reversed_feature_name] = p.modality[feature_name]
else:
reversed_modality[feature_name] = p.modality[feature_name]
p.modality = reversed_modality
# Swap vocab sizes.
reversed_vocab_size = {}
for feature_name in p.vocab_size:
reversed_feature_name = feature_name.replace("target", "input")
if "target" in feature_name and reversed_feature_name in p.vocab_size:
reversed_vocab_size[feature_name] = p.vocab_size[reversed_feature_name]
reversed_vocab_size[reversed_feature_name] = p.vocab_size[feature_name]
else:
reversed_vocab_size[feature_name] = p.vocab_size[feature_name]
p.vocab_size = reversed_vocab_size
# Swap vocabularies.
input_vocabulary = p.vocabulary.pop("inputs", None)
target_vocabulary = p.vocabulary.pop("targets", None)
if input_vocabulary is not None:
p.vocabulary["targets"] = input_vocabulary
if target_vocabulary is not None:
p.vocabulary["inputs"] = target_vocabulary
# Swap input/target space ids.
input_space_id = p.input_space_id
target_space_id = p.target_space_id
if input_space_id is not None:
p.target_space_id = input_space_id
else:
p.target_space_id = SpaceID.GENERIC
if target_space_id is not None:
p.input_space_id = target_space_id
else:
p.input_space_id = SpaceID.GENERIC
# Mark that p was reversed.
p.was_reversed = True
|
A set of basic model hyperparameters.
def _default_hparams():
"""A set of basic model hyperparameters."""
return hparam.HParams(
# Use this parameter to get comparable perplexity numbers with different
# tokenizations. This value should be set to the ratio of the number of
# tokens in the test set according to the tokenization used to the number
# of tokens in the test set in the "official" tokenization. For
# example, if we are using a word-piece based model and we want to
# compute per-word perplexity, then we set loss_multiplier to the number
# of wordpieces per word in the test set.
loss_multiplier=1.0,
# Use this parameter to allow for larger sequences in the batch. Without
# the use of this parameter, the size of the inner two dimensions will
# be used to judge the sequence length.
batch_size_multiplier=1,
# During inference for autoregressive problems, if the batch_size is 1,
# the inference will stop when the model predict a text_encoder.EOS_ID
# token.
stop_at_eos=False,
# Modalities used to map from features to a space compatible with
# chosen model architecture. It comprises key-value pairs of a feature
# name (str) and its modality type.
modality={},
vocab_size={},
# Identifiers used to tell the model which input/target space will be
# expected. For example, it can tell that we expect French as characters
# as output, or Spanish as sound. Spaces defined as constants in SpaceID
# class.
input_space_id=SpaceID.GENERIC,
target_space_id=SpaceID.GENERIC)
|
Batch size in examples per TPU core.
Args:
model_hparams: model hyperparameters
Returns:
an integer
def tpu_batch_size_per_shard(self, model_hparams):
"""Batch size in examples per TPU core.
Args:
model_hparams: model hyperparameters
Returns:
an integer
"""
if self.batch_size_means_tokens and not model_hparams.use_fixed_batch_size:
return model_hparams.batch_size // self.max_length(model_hparams)
else:
return model_hparams.batch_size
|
Runtime preprocessing on the whole dataset.
Return a tf.data.Datset -- the preprocessed version of the given one.
By default this function calls preprocess_example.
Args:
dataset: the Dataset of already decoded but not yet preprocessed features.
mode: tf.estimator.ModeKeys
hparams: HParams, model hyperparameters
interleave: bool, whether to use parallel_interleave, which is faster
but will alter the order of samples non-deterministically, or flat_map,
which is slower but will preserve the sample order.
Returns:
a Dataset
def preprocess(self, dataset, mode, hparams, interleave=True):
"""Runtime preprocessing on the whole dataset.
Return a tf.data.Datset -- the preprocessed version of the given one.
By default this function calls preprocess_example.
Args:
dataset: the Dataset of already decoded but not yet preprocessed features.
mode: tf.estimator.ModeKeys
hparams: HParams, model hyperparameters
interleave: bool, whether to use parallel_interleave, which is faster
but will alter the order of samples non-deterministically, or flat_map,
which is slower but will preserve the sample order.
Returns:
a Dataset
"""
def _preprocess(example):
examples = self.preprocess_example(example, mode, hparams)
if not isinstance(examples, tf.data.Dataset):
examples = tf.data.Dataset.from_tensors(examples)
return examples
if interleave:
dataset = dataset.apply(
tf.data.experimental.parallel_interleave(
_preprocess, sloppy=True, cycle_length=8))
else:
dataset = dataset.flat_map(_preprocess)
return dataset
|
Get filepattern for data files for mode.
Matches mode to a suffix.
* DatasetSplit.TRAIN: train
* DatasetSplit.EVAL: dev
* DatasetSplit.TEST: test
* tf.estimator.ModeKeys.PREDICT: dev
Args:
data_dir: str, data directory.
mode: DatasetSplit
shard: int, if provided, will only read data from the specified shard.
Returns:
filepattern str
def filepattern(self, data_dir, mode, shard=None):
"""Get filepattern for data files for mode.
Matches mode to a suffix.
* DatasetSplit.TRAIN: train
* DatasetSplit.EVAL: dev
* DatasetSplit.TEST: test
* tf.estimator.ModeKeys.PREDICT: dev
Args:
data_dir: str, data directory.
mode: DatasetSplit
shard: int, if provided, will only read data from the specified shard.
Returns:
filepattern str
"""
path = os.path.join(data_dir, self.dataset_filename())
shard_str = "-%05d" % shard if shard is not None else ""
if mode == DatasetSplit.TRAIN:
suffix = "train"
elif mode in [DatasetSplit.EVAL, tf.estimator.ModeKeys.PREDICT]:
suffix = "dev"
else:
assert mode == DatasetSplit.TEST
suffix = "test"
return "%s-%s%s*" % (path, suffix, shard_str)
|
Returns problem_hparams.
def get_hparams(self, model_hparams=None):
"""Returns problem_hparams."""
if self._hparams is not None:
return self._hparams
if model_hparams is None:
model_hparams = default_model_hparams()
if self._encoders is None:
data_dir = (model_hparams and hasattr(model_hparams, "data_dir") and
model_hparams.data_dir) or None
self.get_feature_encoders(data_dir)
hp = _default_hparams()
ret = self.hparams(hp, model_hparams)
if ret is not None:
raise ValueError("The Problem subclass hparams function should mutate "
"the defaults passed in and return None.")
hp.add_hparam("vocabulary", self._encoders)
hp.add_hparam("was_reversed", self._was_reversed)
hp.add_hparam("was_copy", self._was_copy)
if self._was_reversed:
_reverse_problem_hparams(hp)
if self._was_copy:
_copy_problem_hparams(hp)
self._hparams = hp
return self._hparams
|
Reverse features between inputs and targets if the problem is '_rev'.
def maybe_reverse_features(self, feature_map):
"""Reverse features between inputs and targets if the problem is '_rev'."""
if not self._was_reversed:
return
inputs = feature_map.pop("inputs", None)
targets = feature_map.pop("targets", None)
inputs_seg = feature_map.pop("inputs_segmentation", None)
targets_seg = feature_map.pop("targets_segmentation", None)
inputs_pos = feature_map.pop("inputs_position", None)
targets_pos = feature_map.pop("targets_position", None)
if inputs is not None:
feature_map["targets"] = inputs
if targets is not None:
feature_map["inputs"] = targets
if inputs_seg is not None:
feature_map["targets_segmentation"] = inputs_seg
if targets_seg is not None:
feature_map["inputs_segmentation"] = targets_seg
if inputs_pos is not None:
feature_map["targets_position"] = inputs_pos
if targets_pos is not None:
feature_map["inputs_position"] = targets_pos
|
Build a Dataset for this problem.
Args:
mode: tf.estimator.ModeKeys; determines which files to read from.
data_dir: directory that contains data files.
num_threads: int, number of threads to use for decode and preprocess
Dataset.map calls.
output_buffer_size: int, how many elements to prefetch at end of pipeline.
shuffle_files: whether to shuffle input files. Default behavior (i.e. when
shuffle_files=None) is to shuffle if mode == TRAIN.
hparams: HParams; hparams to be passed to
Problem.preprocess_example and Problem.hparams. If None, will use a
default set that is a no-op.
preprocess: bool, whether to map the Dataset through
Problem.preprocess_example.
dataset_split: DatasetSplit, which split to read data
from (TRAIN:"-train", EVAL:"-dev", "test":"-test"). Defaults to mode.
shard: int, if provided, will only read data from the specified shard.
partition_id: integer - which partition of the dataset to read from
num_partitions: how many partitions in the dataset
shuffle_buffer_size: if shuffle_files is True, this is the buffer size
used to shuffle records.
max_records: int, number of records to truncate to.
Returns:
Dataset containing dict<feature name, Tensor>.
Raises:
ValueError: if num_partitions is greater than the number of data files.
def dataset(self,
mode,
data_dir=None,
num_threads=None,
output_buffer_size=None,
shuffle_files=None,
hparams=None,
preprocess=True,
dataset_split=None,
shard=None,
partition_id=0,
num_partitions=1,
shuffle_buffer_size=1024,
max_records=-1):
"""Build a Dataset for this problem.
Args:
mode: tf.estimator.ModeKeys; determines which files to read from.
data_dir: directory that contains data files.
num_threads: int, number of threads to use for decode and preprocess
Dataset.map calls.
output_buffer_size: int, how many elements to prefetch at end of pipeline.
shuffle_files: whether to shuffle input files. Default behavior (i.e. when
shuffle_files=None) is to shuffle if mode == TRAIN.
hparams: HParams; hparams to be passed to
Problem.preprocess_example and Problem.hparams. If None, will use a
default set that is a no-op.
preprocess: bool, whether to map the Dataset through
Problem.preprocess_example.
dataset_split: DatasetSplit, which split to read data
from (TRAIN:"-train", EVAL:"-dev", "test":"-test"). Defaults to mode.
shard: int, if provided, will only read data from the specified shard.
partition_id: integer - which partition of the dataset to read from
num_partitions: how many partitions in the dataset
shuffle_buffer_size: if shuffle_files is True, this is the buffer size
used to shuffle records.
max_records: int, number of records to truncate to.
Returns:
Dataset containing dict<feature name, Tensor>.
Raises:
ValueError: if num_partitions is greater than the number of data files.
"""
is_training = mode == tf.estimator.ModeKeys.TRAIN
shuffle_files = shuffle_files or shuffle_files is None and is_training
dataset_split = dataset_split or mode
assert data_dir
if hparams is None:
hparams = default_model_hparams()
if not hasattr(hparams, "data_dir"):
hparams.add_hparam("data_dir", data_dir)
if not hparams.data_dir:
hparams.data_dir = data_dir
# Construct the Problem's hparams so that items within it are accessible
_ = self.get_hparams(hparams)
data_filepattern = self.filepattern(data_dir, dataset_split, shard=shard)
tf.logging.info("Reading data files from %s", data_filepattern)
data_files = sorted(tf.contrib.slim.parallel_reader.get_data_files(
data_filepattern))
# Functions used in dataset transforms below. `filenames` can be either a
# `tf.string` tensor or `tf.data.Dataset` containing one or more filenames.
def _load_records_and_preprocess(filenames):
"""Reads files from a string tensor or a dataset of filenames."""
# Load records from file(s) with an 8MiB read buffer.
dataset = tf.data.TFRecordDataset(filenames, buffer_size=8 * 1024 * 1024)
# Decode.
dataset = dataset.map(self.decode_example, num_parallel_calls=num_threads)
# Preprocess if requested.
# Note that preprocessing should happen per-file as order may matter.
if preprocess:
dataset = self.preprocess(dataset, mode, hparams,
interleave=shuffle_files)
return dataset
if len(data_files) < num_partitions:
raise ValueError(
"number of data files (%d) must be at least the number of hosts (%d)"
% (len(data_files), num_partitions))
data_files = [f for (i, f) in enumerate(data_files)
if i % num_partitions == partition_id]
tf.logging.info(
"partition: %d num_data_files: %d" % (partition_id, len(data_files)))
if shuffle_files:
mlperf_log.transformer_print(key=mlperf_log.INPUT_ORDER)
random.shuffle(data_files)
dataset = tf.data.Dataset.from_tensor_slices(tf.constant(data_files))
# Create data-set from files by parsing, pre-processing and interleaving.
if shuffle_files:
dataset = dataset.apply(
tf.data.experimental.parallel_interleave(
_load_records_and_preprocess, sloppy=True, cycle_length=8))
else:
dataset = _load_records_and_preprocess(dataset)
dataset = dataset.map(
self.maybe_reverse_and_copy, num_parallel_calls=num_threads)
dataset = dataset.take(max_records)
## Shuffle records only for training examples.
if shuffle_files and is_training:
dataset = dataset.shuffle(shuffle_buffer_size)
if hparams.get("pack_dataset", False):
dataset = generator_utils.pack_dataset(
dataset, hparams.max_length, keys=["inputs", "targets"],
use_custom_ops=hparams.get("use_custom_ops", False))
if output_buffer_size:
dataset = dataset.prefetch(output_buffer_size)
return dataset
|
Return a dict of Tensors from a serialized tensorflow.Example.
def decode_example(self, serialized_example):
"""Return a dict of Tensors from a serialized tensorflow.Example."""
data_fields, data_items_to_decoders = self.example_reading_spec()
# Necessary to rejoin examples in the correct order with the Cloud ML Engine
# batch prediction API.
data_fields["batch_prediction_key"] = tf.FixedLenFeature([1], tf.int64, 0)
if data_items_to_decoders is None:
data_items_to_decoders = {
field: tf.contrib.slim.tfexample_decoder.Tensor(field)
for field in data_fields
}
decoder = tf.contrib.slim.tfexample_decoder.TFExampleDecoder(
data_fields, data_items_to_decoders)
decode_items = list(sorted(data_items_to_decoders))
decoded = decoder.decode(serialized_example, items=decode_items)
return dict(zip(decode_items, decoded))
|
Retrieve dict<feature name, FeatureInfo>.
Must first call Problem.get_hparams or Problem.dataset to have the problem's
internal hparams already constructed.
Returns:
dict<feature name, FeatureInfo>
def feature_info(self):
"""Retrieve dict<feature name, FeatureInfo>.
Must first call Problem.get_hparams or Problem.dataset to have the problem's
internal hparams already constructed.
Returns:
dict<feature name, FeatureInfo>
"""
if self._feature_info is not None:
return self._feature_info
assert self._hparams is not None
hp = self.get_hparams()
if self.has_inputs:
in_id = hp.input_space_id
out_id = hp.target_space_id
features = collections.defaultdict(FeatureInfo)
for feature_name, modality_cls in six.iteritems(hp.modality):
finfo = features[feature_name]
finfo.modality = modality_cls
finfo.vocab_size = hp.vocab_size[feature_name]
vocabs = hp.vocabulary
for name, encoder in six.iteritems(vocabs):
features[name].encoder = encoder
if self.has_inputs:
features["inputs"].space_id = in_id
features["targets"].space_id = out_id
self._feature_info = features
return features
|
Return input_fn wrapped for Estimator.
def make_estimator_input_fn(self,
mode,
hparams,
data_dir=None,
force_repeat=False,
prevent_repeat=False,
dataset_kwargs=None):
"""Return input_fn wrapped for Estimator."""
def estimator_input_fn(params, config):
return self.input_fn(
mode,
hparams,
data_dir=data_dir,
params=params,
config=config,
force_repeat=force_repeat,
prevent_repeat=prevent_repeat,
dataset_kwargs=dataset_kwargs)
return estimator_input_fn
|
Which part of the training data to read.
If there are multiple parallel calls to input_fn (multiple TPU hosts),
then we want each one to read from a separate partition of the training
data.
Args:
mode: tf.estimator.ModeKeys
config: RunConfig
params: A dict that contains parameters.
Returns:
partition_id: an integer
num_partitions: an integer
def _dataset_partition(self, mode, config, params):
"""Which part of the training data to read.
If there are multiple parallel calls to input_fn (multiple TPU hosts),
then we want each one to read from a separate partition of the training
data.
Args:
mode: tf.estimator.ModeKeys
config: RunConfig
params: A dict that contains parameters.
Returns:
partition_id: an integer
num_partitions: an integer
"""
if mode != tf.estimator.ModeKeys.TRAIN or not hasattr(config, "tpu_config"):
# Reset in the case when using TPU but alternating TRAIN and EVAL.
self._next_partition_id = 0
return 0, 1
phift = config.tpu_config.per_host_input_for_training
# This is the mesh-tensorflow case.
if (hasattr(tpu_config.InputPipelineConfig, "BROADCAST") and
phift == tpu_config.InputPipelineConfig.BROADCAST):
return 0, 1
if phift:
num_hosts = (params["context"].num_hosts if "context" in params
else config.tpu_config.num_shards // 8)
num_partitions = max(num_hosts, 1)
else:
num_partitions = config.tpu_config.num_shards
partition_id = getattr(self, "_next_partition_id", 0)
self._next_partition_id = partition_id + 1
tf.logging.info("num_partitions = %d partition_id = %d" %
(num_partitions, partition_id))
assert partition_id < num_partitions
return partition_id, num_partitions
|
Builds input pipeline for problem.
Args:
mode: tf.estimator.ModeKeys
hparams: HParams, model hparams
data_dir: str, data directory; if None, will use hparams.data_dir
params: dict, may include "batch_size"
config: RunConfig; should have the data_parallelism attribute if not using
TPU
force_repeat: bool, whether to repeat the data even if not training
prevent_repeat: bool, whether to not repeat when in training mode.
Overrides force_repeat.
dataset_kwargs: dict, if passed, will pass as kwargs to self.dataset
method when called
Returns:
(features_dict<str name, Tensor feature>, Tensor targets)
def input_fn(self,
mode,
hparams,
data_dir=None,
params=None,
config=None,
force_repeat=False,
prevent_repeat=False,
dataset_kwargs=None):
"""Builds input pipeline for problem.
Args:
mode: tf.estimator.ModeKeys
hparams: HParams, model hparams
data_dir: str, data directory; if None, will use hparams.data_dir
params: dict, may include "batch_size"
config: RunConfig; should have the data_parallelism attribute if not using
TPU
force_repeat: bool, whether to repeat the data even if not training
prevent_repeat: bool, whether to not repeat when in training mode.
Overrides force_repeat.
dataset_kwargs: dict, if passed, will pass as kwargs to self.dataset
method when called
Returns:
(features_dict<str name, Tensor feature>, Tensor targets)
"""
partition_id, num_partitions = self._dataset_partition(mode, config, params)
is_training = mode == tf.estimator.ModeKeys.TRAIN
if config and config.use_tpu:
num_threads = 64
else:
num_threads = data_reader.cpu_count() if is_training else 1
data_dir = data_dir or (hasattr(hparams, "data_dir") and hparams.data_dir)
dataset_kwargs = dataset_kwargs or {}
dataset_kwargs.update({
"mode": mode,
"data_dir": data_dir,
"num_threads": num_threads,
"hparams": hparams,
"partition_id": partition_id,
"num_partitions": num_partitions,
})
return data_reader.input_fn(
self.dataset(**dataset_kwargs),
self.filepattern(data_dir, mode),
self.skip_random_fraction_when_training,
self.batch_size_means_tokens,
self.get_hparams().batch_size_multiplier,
self.max_length(hparams),
mode,
hparams,
data_dir=data_dir,
params=params,
config=config,
force_repeat=force_repeat,
prevent_repeat=prevent_repeat)
|
Input fn for serving export, starting from serialized example.
def serving_input_fn(self, hparams, decode_hparams=None, use_tpu=False):
"""Input fn for serving export, starting from serialized example."""
mode = tf.estimator.ModeKeys.PREDICT
serialized_example = tf.placeholder(
dtype=tf.string, shape=[None], name="serialized_example")
dataset = tf.data.Dataset.from_tensor_slices(serialized_example)
dataset = dataset.map(self.decode_example)
dataset = dataset.map(lambda ex: self.preprocess_example(ex, mode, hparams))
dataset = dataset.map(data_reader.cast_ints_to_int32)
if use_tpu:
padded_shapes = data_reader.pad_for_tpu(dataset.output_shapes, hparams,
hparams.max_length)
batch_size = 1 if not decode_hparams else getattr(decode_hparams,
"batch_size", 1)
dataset = dataset.padded_batch(
batch_size, padded_shapes, drop_remainder=False)
dataset = dataset.map(
functools.partial(data_reader.pad_batch, batch_multiple=batch_size))
else:
dataset = dataset.padded_batch(
tf.shape(serialized_example, out_type=tf.int64)[0],
dataset.output_shapes)
dataset = dataset.map(data_reader.standardize_shapes)
features = tf.data.experimental.get_single_element(dataset)
if self.has_inputs:
features.pop("targets", None)
return tf.estimator.export.ServingInputReceiver(
features=features, receiver_tensors=serialized_example)
|
Get hyper-parameters file path.
def _get_hparams_path():
"""Get hyper-parameters file path."""
hparams_path = None
if FLAGS.output_dir:
hparams_path = os.path.join(FLAGS.output_dir, "hparams.json")
else:
tf.logging.warning(
"--output_dir not specified. Hyper-parameters will be infered from"
"--hparams_set and --hparams only. These may not match training time"
"hyper-parameters.")
return hparams_path
|
Exports given checkpoint as tfhub module with given spec.
def export_module_spec_with_checkpoint(module_spec,
checkpoint_path,
export_path,
scope_prefix=""):
"""Exports given checkpoint as tfhub module with given spec."""
# The main requirement is that it is possible to know how to map from
# module variable name to checkpoint variable name.
# This is trivial if the original code used variable scopes,
# but can be messy if the variables to export are interwined
# with variables not export.
with tf.Graph().as_default():
m = hub.Module(module_spec)
assign_map = {
scope_prefix + name: value for name, value in m.variable_map.items()
}
tf.train.init_from_checkpoint(checkpoint_path, assign_map)
init_op = tf.initializers.global_variables()
with tf.Session() as session:
session.run(init_op)
m.export(export_path, session)
|
Exports the last checkpoint from the directory as tfhub module.
It creates the Module spec and signature (based on T2T problem information),
which is later used to create and export the hub module.
Module will be saved inside the ckpt_dir.
Args:
model_name: name of the model to be exported.
hparams: T2T parameters, model graph will be based on them.
decode_hparams: T2T parameters for decoding.
problem: the name of the problem
checkpoint_path: path to the checkpoint to be exported.
export_dir: Directory to write the exported model to.
def export_as_tfhub_module(model_name,
hparams,
decode_hparams,
problem,
checkpoint_path,
export_dir):
"""Exports the last checkpoint from the directory as tfhub module.
It creates the Module spec and signature (based on T2T problem information),
which is later used to create and export the hub module.
Module will be saved inside the ckpt_dir.
Args:
model_name: name of the model to be exported.
hparams: T2T parameters, model graph will be based on them.
decode_hparams: T2T parameters for decoding.
problem: the name of the problem
checkpoint_path: path to the checkpoint to be exported.
export_dir: Directory to write the exported model to.
"""
def hub_module_fn():
"""Creates the TF graph for the hub module."""
model_fn = t2t_model.T2TModel.make_estimator_model_fn(
model_name,
hparams,
decode_hparams=decode_hparams,
use_tpu=FLAGS.use_tpu)
features = problem.serving_input_fn(
hparams, decode_hparams, use_tpu=FLAGS.use_tpu).features
# we must do a copy of the features, as the model_fn can add additional
# entries there (like hyperparameter settings etc).
original_features = features.copy()
spec = model_fn(features, labels=None, mode=tf.estimator.ModeKeys.PREDICT)
hub.add_signature(
inputs=original_features,
outputs=spec.export_outputs["serving_default"].outputs)
# TFHub doesn't support the following collections.
drop_collections = [tf.GraphKeys.LOSSES,
tf.GraphKeys.SUMMARIES, tf.GraphKeys.LOCAL_VARIABLES]
module_spec = hub.create_module_spec(
hub_module_fn, drop_collections=drop_collections)
# Loads the weights from the checkpoint using the model above
# and saves it in the export_path.
export_module_spec_with_checkpoint(
module_spec,
checkpoint_path=checkpoint_path,
export_path=export_dir,
scope_prefix="")
|
Build the graph required to fetch the attention weights.
Args:
hparams_set: HParams set to build the model with.
model_name: Name of model.
data_dir: Path to directory containing training data.
problem_name: Name of problem.
beam_size: (Optional) Number of beams to use when decoding a translation.
If set to 1 (default) then greedy decoding is used.
Returns:
Tuple of (
inputs: Input placeholder to feed in ids to be translated.
targets: Targets placeholder to feed to translation when fetching
attention weights.
samples: Tensor representing the ids of the translation.
att_mats: Tensors representing the attention weights.
)
def build_model(hparams_set, model_name, data_dir, problem_name, beam_size=1):
"""Build the graph required to fetch the attention weights.
Args:
hparams_set: HParams set to build the model with.
model_name: Name of model.
data_dir: Path to directory containing training data.
problem_name: Name of problem.
beam_size: (Optional) Number of beams to use when decoding a translation.
If set to 1 (default) then greedy decoding is used.
Returns:
Tuple of (
inputs: Input placeholder to feed in ids to be translated.
targets: Targets placeholder to feed to translation when fetching
attention weights.
samples: Tensor representing the ids of the translation.
att_mats: Tensors representing the attention weights.
)
"""
hparams = trainer_lib.create_hparams(
hparams_set, data_dir=data_dir, problem_name=problem_name)
translate_model = registry.model(model_name)(
hparams, tf.estimator.ModeKeys.EVAL)
inputs = tf.placeholder(tf.int32, shape=(1, None, 1, 1), name="inputs")
targets = tf.placeholder(tf.int32, shape=(1, None, 1, 1), name="targets")
translate_model({
"inputs": inputs,
"targets": targets,
})
# Must be called after building the training graph, so that the dict will
# have been filled with the attention tensors. BUT before creating the
# inference graph otherwise the dict will be filled with tensors from
# inside a tf.while_loop from decoding and are marked unfetchable.
att_mats = get_att_mats(translate_model)
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
samples = translate_model.infer({
"inputs": inputs,
}, beam_size=beam_size)["outputs"]
return inputs, targets, samples, att_mats
|
Get's the tensors representing the attentions from a build model.
The attentions are stored in a dict on the Transformer object while building
the graph.
Args:
translate_model: Transformer object to fetch the attention weights from.
Returns:
Tuple of attention matrices; (
enc_atts: Encoder self attention weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, inp_len, inp_len)
dec_atts: Decoder self attetnion weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, out_len, out_len)
encdec_atts: Encoder-Decoder attention weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, out_len, inp_len)
)
def get_att_mats(translate_model):
"""Get's the tensors representing the attentions from a build model.
The attentions are stored in a dict on the Transformer object while building
the graph.
Args:
translate_model: Transformer object to fetch the attention weights from.
Returns:
Tuple of attention matrices; (
enc_atts: Encoder self attention weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, inp_len, inp_len)
dec_atts: Decoder self attetnion weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, out_len, out_len)
encdec_atts: Encoder-Decoder attention weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, out_len, inp_len)
)
"""
enc_atts = []
dec_atts = []
encdec_atts = []
prefix = "transformer/body/"
postfix_self_attention = "/multihead_attention/dot_product_attention"
if translate_model.hparams.self_attention_type == "dot_product_relative":
postfix_self_attention = ("/multihead_attention/"
"dot_product_attention_relative")
postfix_encdec = "/multihead_attention/dot_product_attention"
for i in range(translate_model.hparams.num_hidden_layers):
enc_att = translate_model.attention_weights[
"%sencoder/layer_%i/self_attention%s"
% (prefix, i, postfix_self_attention)]
dec_att = translate_model.attention_weights[
"%sdecoder/layer_%i/self_attention%s"
% (prefix, i, postfix_self_attention)]
encdec_att = translate_model.attention_weights[
"%sdecoder/layer_%i/encdec_attention%s" % (prefix, i, postfix_encdec)]
enc_atts.append(enc_att)
dec_atts.append(dec_att)
encdec_atts.append(encdec_att)
return enc_atts, dec_atts, encdec_atts
|
Input str to features dict, ready for inference.
def encode(self, input_str):
"""Input str to features dict, ready for inference."""
inputs = self.encoders["inputs"].encode(input_str) + [EOS_ID]
batch_inputs = np.reshape(inputs, [1, -1, 1, 1]) # Make it 3D.
return batch_inputs
|
List of ints to str.
def decode(self, integers):
"""List of ints to str."""
integers = list(np.squeeze(integers))
return self.encoders["inputs"].decode(integers)
|
List of ints to list of str.
def decode_list(self, integers):
"""List of ints to list of str."""
integers = list(np.squeeze(integers))
return self.encoders["inputs"].decode_list(integers)
|
Constructs the data needed for visualizing attentions.
Args:
sess: A tf.Session object.
input_string: The input sentence to be translated and visualized.
Returns:
Tuple of (
output_string: The translated sentence.
input_list: Tokenized input sentence.
output_list: Tokenized translation.
att_mats: Tuple of attention matrices; (
enc_atts: Encoder self attention weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, inp_len, inp_len)
dec_atts: Decoder self attention weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, out_len, out_len)
encdec_atts: Encoder-Decoder attention weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, out_len, inp_len)
)
def get_vis_data_from_string(self, sess, input_string):
"""Constructs the data needed for visualizing attentions.
Args:
sess: A tf.Session object.
input_string: The input sentence to be translated and visualized.
Returns:
Tuple of (
output_string: The translated sentence.
input_list: Tokenized input sentence.
output_list: Tokenized translation.
att_mats: Tuple of attention matrices; (
enc_atts: Encoder self attention weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, inp_len, inp_len)
dec_atts: Decoder self attention weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, out_len, out_len)
encdec_atts: Encoder-Decoder attention weights.
A list of `num_layers` numpy arrays of size
(batch_size, num_heads, out_len, inp_len)
)
"""
encoded_inputs = self.encode(input_string)
# Run inference graph to get the translation.
out = sess.run(self.samples, {
self.inputs: encoded_inputs,
})
# Run the decoded translation through the training graph to get the
# attention tensors.
att_mats = sess.run(self.att_mats, {
self.inputs: encoded_inputs,
self.targets: np.reshape(out, [1, -1, 1, 1]),
})
output_string = self.decode(out)
input_list = self.decode_list(encoded_inputs)
output_list = self.decode_list(out)
return output_string, input_list, output_list, att_mats
|
Glow Hparams.
def glow_hparams():
"""Glow Hparams."""
hparams = common_hparams.basic_params1()
hparams.clip_grad_norm = None
hparams.weight_decay = 0.0
hparams.learning_rate_constant = 3e-4
hparams.batch_size = 32
# can be prev_level, prev_step or normal.
# see: glow_ops.merge_level_and_latent_dist
hparams.add_hparam("level_scale", "prev_level")
hparams.add_hparam("n_levels", 3)
hparams.add_hparam("n_bits_x", 8)
hparams.add_hparam("depth", 32)
# Activation - Relu or Gatu
hparams.add_hparam("activation", "relu")
# Coupling layer, additive or affine.
hparams.add_hparam("coupling", "affine")
hparams.add_hparam("coupling_width", 512)
hparams.add_hparam("coupling_dropout", 0.0)
hparams.add_hparam("top_prior", "single_conv")
# init_batch_size denotes the number of examples used for data-dependent
# initialization. A higher init_batch_size is required for training
# stability especially when hparams.batch_size is low.
hparams.add_hparam("init_batch_size", 256)
hparams.add_hparam("temperature", 1.0)
return hparams
|
Shifts and pads with zero along an axis.
Example:
shift_and_pad([1, 2, 3, 4], 2) --> [0, 0, 1, 2]
shift_and_pad([1, 2, 3, 4], -2) --> [3, 4, 0, 0]
Args:
tensor: Tensor; to be shifted and padded.
shift: int; number of positions to shift by.
axis: int; along which axis to shift and pad.
Returns:
A Tensor with the same shape as the input tensor.
def shift_and_pad(tensor, shift, axis=0):
"""Shifts and pads with zero along an axis.
Example:
shift_and_pad([1, 2, 3, 4], 2) --> [0, 0, 1, 2]
shift_and_pad([1, 2, 3, 4], -2) --> [3, 4, 0, 0]
Args:
tensor: Tensor; to be shifted and padded.
shift: int; number of positions to shift by.
axis: int; along which axis to shift and pad.
Returns:
A Tensor with the same shape as the input tensor.
"""
shape = tensor.shape
rank = len(shape)
assert 0 <= abs(axis) < rank
length = int(shape[axis])
assert 0 <= abs(shift) < length
paddings = [(0, 0)] * rank
begin = [0] * rank
size = [-1] * rank
if shift > 0:
paddings[axis] = (shift, 0)
size[axis] = length - shift
elif shift < 0:
paddings[axis] = (0, -shift)
begin[axis] = -shift
ret = tf.pad(tf.slice(tensor, begin, size), paddings)
return ret
|
Set of hyperparameters.
def transformer_aux_base():
"""Set of hyperparameters."""
hparams = transformer.transformer_base()
hparams.shared_embedding_and_softmax_weights = False
hparams.add_hparam("shift_values", "1,2,3,4")
return hparams
|
Set of hyperparameters.
def transformer_aux_tiny():
"""Set of hyperparameters."""
hparams = transformer.transformer_tiny()
hparams.shared_embedding_and_softmax_weights = False
hparams.add_hparam("shift_values", "1,2")
return hparams
|
Given frame_logits from a per-pixel softmax, generate colors.
def pixels_from_softmax(frame_logits, pure_sampling=False,
temperature=1.0, gumbel_noise_factor=0.2):
"""Given frame_logits from a per-pixel softmax, generate colors."""
# If we're purely sampling, just sample each pixel.
if pure_sampling or temperature == 0.0:
return common_layers.sample_with_temperature(frame_logits, temperature)
# Gumbel-sample from the pixel sofmax and average by pixel values.
pixel_range = tf.to_float(tf.range(256))
for _ in range(len(frame_logits.get_shape().as_list()) - 1):
pixel_range = tf.expand_dims(pixel_range, axis=0)
frame_logits = tf.nn.log_softmax(frame_logits)
gumbel_samples = discretization.gumbel_sample(
common_layers.shape_list(frame_logits)) * gumbel_noise_factor
frame = tf.nn.softmax((frame_logits + gumbel_samples) / temperature, axis=-1)
result = tf.reduce_sum(frame * pixel_range, axis=-1)
# Round on the forward pass, not on the backward one.
return result + tf.stop_gradient(tf.round(result) - result)
|
Common HParams for next_frame models.
def next_frame_base():
"""Common HParams for next_frame models."""
hparams = common_hparams.basic_params1()
# Loss cutoff.
hparams.add_hparam("video_modality_loss_cutoff", 0.01)
# Additional resizing the frames before feeding them to model.
hparams.add_hparam("preprocess_resize_frames", None)
# How many data points to suffle. Ideally should be part of problem not model!
hparams.add_hparam("shuffle_buffer_size", 128)
# Tiny mode. For faster tests.
hparams.add_hparam("tiny_mode", False)
# In case a model supports smaller/faster version.
hparams.add_hparam("small_mode", False)
# In case a model has stochastic version.
hparams.add_hparam("stochastic_model", False)
# Internal loss for recurrent models.
hparams.add_hparam("internal_loss", True)
# choose from: concat, multiplicative, multi_additive
hparams.add_hparam("action_injection", "multi_additive")
# Scheduled sampling method. Choose between
# ground_truth_only, prediction_only, prob, count, prob_inverse_exp.
hparams.add_hparam("scheduled_sampling_mode", "prediction_only")
hparams.add_hparam("scheduled_sampling_decay_steps", 10000)
hparams.add_hparam("scheduled_sampling_max_prob", 1.0)
hparams.add_hparam("scheduled_sampling_k", 900.0)
return hparams
|
Removes top level TimeLimit Wrapper.
Removes TimeLimit Wrapper from top level if exists, throws error if any other
TimeLimit Wrapper is present in stack.
Args:
env: environment
Returns:
the env with removed time limit wrapper.
def remove_time_limit_wrapper(env):
"""Removes top level TimeLimit Wrapper.
Removes TimeLimit Wrapper from top level if exists, throws error if any other
TimeLimit Wrapper is present in stack.
Args:
env: environment
Returns:
the env with removed time limit wrapper.
"""
if isinstance(env, gym.wrappers.TimeLimit):
env = env.env
env_ = env
while isinstance(env_, gym.Wrapper):
if isinstance(env_, gym.wrappers.TimeLimit):
raise ValueError("Can remove only top-level TimeLimit gym.Wrapper.")
env_ = env_.env
return env
|
Wraps a gym environment. see make_gym_env for details.
def gym_env_wrapper(env, rl_env_max_episode_steps, maxskip_env, rendered_env,
rendered_env_resize_to, sticky_actions):
"""Wraps a gym environment. see make_gym_env for details."""
# rl_env_max_episode_steps is None or int.
assert ((not rl_env_max_episode_steps) or
isinstance(rl_env_max_episode_steps, int))
wrap_with_time_limit = ((not rl_env_max_episode_steps) or
rl_env_max_episode_steps >= 0)
if wrap_with_time_limit:
env = remove_time_limit_wrapper(env)
if sticky_actions:
env = StickyActionEnv(env)
if maxskip_env:
env = MaxAndSkipEnv(env) # pylint: disable=redefined-variable-type
if rendered_env:
env = RenderedEnv(env, resize_to=rendered_env_resize_to)
if wrap_with_time_limit:
env = gym.wrappers.TimeLimit(
env, max_episode_steps=rl_env_max_episode_steps)
return env
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.