text
stringlengths 81
112k
|
|---|
Create a gym env optionally with a time limit and maxskip wrapper.
NOTE: The returned env may already be wrapped with TimeLimit!
Args:
name: `str` - base name of the gym env to make.
rl_env_max_episode_steps: `int` or None - Using any value < 0 returns the
env as-in, otherwise we impose the requested timelimit. Setting this to
None returns a wrapped env that doesn't have a step limit.
maxskip_env: whether to also use MaxAndSkip wrapper before time limit.
rendered_env: whether to force render for observations. Use this for
environments that are not natively rendering the scene for observations.
rendered_env_resize_to: a list of [height, width] to change the original
resolution of the native environment render.
sticky_actions: whether to use sticky_actions before MaxAndSkip wrapper.
Returns:
An instance of `gym.Env` or `gym.Wrapper`.
def make_gym_env(name,
rl_env_max_episode_steps=-1,
maxskip_env=False,
rendered_env=False,
rendered_env_resize_to=None,
sticky_actions=False):
"""Create a gym env optionally with a time limit and maxskip wrapper.
NOTE: The returned env may already be wrapped with TimeLimit!
Args:
name: `str` - base name of the gym env to make.
rl_env_max_episode_steps: `int` or None - Using any value < 0 returns the
env as-in, otherwise we impose the requested timelimit. Setting this to
None returns a wrapped env that doesn't have a step limit.
maxskip_env: whether to also use MaxAndSkip wrapper before time limit.
rendered_env: whether to force render for observations. Use this for
environments that are not natively rendering the scene for observations.
rendered_env_resize_to: a list of [height, width] to change the original
resolution of the native environment render.
sticky_actions: whether to use sticky_actions before MaxAndSkip wrapper.
Returns:
An instance of `gym.Env` or `gym.Wrapper`.
"""
env = gym.make(name)
return gym_env_wrapper(env, rl_env_max_episode_steps, maxskip_env,
rendered_env, rendered_env_resize_to, sticky_actions)
|
Registers the class in Gym and returns the registered name and the env.
def register_gym_env(class_entry_point, version="v0", kwargs=None):
"""Registers the class in Gym and returns the registered name and the env."""
split_on_colon = class_entry_point.split(":")
assert len(split_on_colon) == 2
class_name = split_on_colon[1]
# We have to add the version to conform to gym's API.
env_name = "T2TEnv-{}-{}".format(class_name, version)
gym.envs.register(id=env_name, entry_point=class_entry_point, kwargs=kwargs)
tf.logging.info("Entry Point [%s] registered with id [%s]", class_entry_point,
env_name)
return env_name, gym.make(env_name)
|
Repeat action, sum reward, and max over last observations.
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2:
self._obs_buffer[0] = obs
if i == self._skip - 1:
self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame doesn't matter.
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
|
Log out and possibly reraise errors during import.
def _handle_errors(errors):
"""Log out and possibly reraise errors during import."""
if not errors:
return
log_all = True # pylint: disable=unused-variable
err_msg = "T2T: skipped importing {num_missing} data_generators modules."
print(err_msg.format(num_missing=len(errors)))
for module, err in errors:
err_str = str(err)
if not _is_import_err_msg(err_str, module):
print("From module %s" % module)
raise err
if log_all:
print("Did not import module: %s; Cause: %s" % (module, err_str))
|
Create HParams with data_dir and problem hparams, if kwargs provided.
def create_hparams(hparams_set,
hparams_overrides_str="",
data_dir=None,
problem_name=None,
hparams_path=None):
"""Create HParams with data_dir and problem hparams, if kwargs provided."""
hparams = registry.hparams(hparams_set)
if hparams_path and tf.gfile.Exists(hparams_path):
hparams = create_hparams_from_json(hparams_path, hparams)
if data_dir:
hparams.add_hparam("data_dir", data_dir)
if hparams_overrides_str:
tf.logging.info("Overriding hparams in %s with %s", hparams_set,
hparams_overrides_str)
hparams = hparams.parse(hparams_overrides_str)
if problem_name:
add_problem_hparams(hparams, problem_name)
return hparams
|
Loading hparams from json; can also start from hparams if specified.
def create_hparams_from_json(json_path, hparams=None):
"""Loading hparams from json; can also start from hparams if specified."""
tf.logging.info("Loading hparams from existing json %s" % json_path)
with tf.gfile.Open(json_path, "r") as f:
hparams_values = json.load(f)
# Prevent certain keys from overwriting the passed-in hparams.
# TODO(trandustin): Remove this hack after registries are available to avoid
# saving them as functions.
hparams_values.pop("bottom", None)
hparams_values.pop("loss", None)
hparams_values.pop("name", None)
hparams_values.pop("top", None)
hparams_values.pop("weights_fn", None)
new_hparams = hparam.HParams(**hparams_values)
# Some keys are in new_hparams but not hparams, so we need to be more
# careful than simply using parse_json() from HParams
if hparams: # hparams specified, so update values from json
for key in sorted(new_hparams.values().keys()):
if hasattr(hparams, key): # Overlapped keys
value = getattr(hparams, key)
new_value = getattr(new_hparams, key)
if value != new_value: # Different values
tf.logging.info("Overwrite key %s: %s -> %s" % (
key, value, new_value))
setattr(hparams, key, new_value)
else:
hparams = new_hparams
return hparams
|
Add problem hparams for the problems.
def add_problem_hparams(hparams, problem_name_or_instance):
"""Add problem hparams for the problems."""
if isinstance(problem_name_or_instance, problem_lib.Problem):
problem = problem_name_or_instance
else:
problem = registry.problem(problem_name_or_instance)
p_hparams = problem.get_hparams(hparams)
hparams.problem = problem
hparams.problem_hparams = p_hparams
|
Loads exampls from the tsv file.
Args:
tmp_dir: temp directory.
prop_train: proportion of the train data
prop_val: proportion of the validation data
Returns:
All examples in the dataset pluse train, test, and development splits.
def load_examples(tmp_dir, prop_train=0.09, prop_val=0.01):
"""Loads exampls from the tsv file.
Args:
tmp_dir: temp directory.
prop_train: proportion of the train data
prop_val: proportion of the validation data
Returns:
All examples in the dataset pluse train, test, and development splits.
"""
infile = generator_utils.maybe_download(tmp_dir, _TAR, _URL)
tf.logging.info('Loading examples')
all_examples = []
for i, d in enumerate(csv.DictReader(gzip.open(infile), delimiter='\t')):
if i % 100000 == 0:
tf.logging.info('%d examples have been loaded....' % i)
ex = {x: int(y) if y.isdigit() else y for x, y in d.items()}
all_examples.append(ex)
random.seed(1)
random.shuffle(all_examples)
n_train = int(len(all_examples) * prop_train)
n_val = n_train + int(len(all_examples) * prop_val)
train = all_examples[:n_train]
val = all_examples[n_train:n_val]
test = []
for e in all_examples[n_val:]:
if e['n_intervening'] == e['n_diff_intervening']:
test.append(e)
return all_examples, train, val, test
|
Download and extract CIFAR to directory unless it is there.
def _get_cifar(directory, url):
"""Download and extract CIFAR to directory unless it is there."""
filename = os.path.basename(url)
path = generator_utils.maybe_download(directory, filename, url)
tarfile.open(path, "r:gz").extractall(directory)
|
Image generator for CIFAR-10 and 100.
Args:
cifar_version: string; one of "cifar10" or "cifar100"
tmp_dir: path to temporary storage directory.
training: a Boolean; if true, we use the train set, otherwise the test set.
how_many: how many images and labels to generate.
start_from: from which image to start.
Returns:
An instance of image_generator that produces CIFAR-10 images and labels.
def cifar_generator(cifar_version, tmp_dir, training, how_many, start_from=0):
"""Image generator for CIFAR-10 and 100.
Args:
cifar_version: string; one of "cifar10" or "cifar100"
tmp_dir: path to temporary storage directory.
training: a Boolean; if true, we use the train set, otherwise the test set.
how_many: how many images and labels to generate.
start_from: from which image to start.
Returns:
An instance of image_generator that produces CIFAR-10 images and labels.
"""
if cifar_version == "cifar10":
url = _CIFAR10_URL
train_files = _CIFAR10_TRAIN_FILES
test_files = _CIFAR10_TEST_FILES
prefix = _CIFAR10_PREFIX
image_size = _CIFAR10_IMAGE_SIZE
label_key = "labels"
elif cifar_version == "cifar100" or cifar_version == "cifar20":
url = _CIFAR100_URL
train_files = _CIFAR100_TRAIN_FILES
test_files = _CIFAR100_TEST_FILES
prefix = _CIFAR100_PREFIX
image_size = _CIFAR100_IMAGE_SIZE
if cifar_version == "cifar100":
label_key = "fine_labels"
else:
label_key = "coarse_labels"
_get_cifar(tmp_dir, url)
data_files = train_files if training else test_files
all_images, all_labels = [], []
for filename in data_files:
path = os.path.join(tmp_dir, prefix, filename)
with tf.gfile.Open(path, "rb") as f:
if six.PY2:
data = cPickle.load(f)
else:
data = cPickle.load(f, encoding="latin1")
images = data["data"]
num_images = images.shape[0]
images = images.reshape((num_images, 3, image_size, image_size))
all_images.extend([
np.squeeze(images[j]).transpose((1, 2, 0)) for j in range(num_images)
])
labels = data[label_key]
all_labels.extend([labels[j] for j in range(num_images)])
return image_utils.image_generator(
all_images[start_from:start_from + how_many],
all_labels[start_from:start_from + how_many])
|
HParams for PPO base.
def rlmb_ppo_base():
"""HParams for PPO base."""
hparams = _rlmb_base()
ppo_params = dict(
base_algo="ppo",
base_algo_params="ppo_original_params",
# Number of real environments to train on simultaneously.
real_batch_size=1,
# Number of simulated environments to train on simultaneously.
simulated_batch_size=16,
eval_batch_size=32,
# Unused; number of PPO epochs is calculated from the real frame limit.
real_ppo_epochs_num=0,
# Number of frames that can be taken from the simulated environment before
# it diverges, used for training the agent.
ppo_epochs_num=1000, # This should be enough to see something
# Should be equal to simulated_rollout_length.
# TODO(koz4k): Uncouple this by outputing done from SimulatedBatchEnv.
ppo_epoch_length=hparams.simulated_rollout_length,
# Do not eval since simulated batch env does not produce dones
ppo_eval_every_epochs=0,
ppo_learning_rate_constant=1e-4, # Will be changed, just so it exists.
# This needs to be divisible by real_ppo_effective_num_agents.
real_ppo_epoch_length=16 * 200,
real_ppo_learning_rate_constant=1e-4,
real_ppo_effective_num_agents=16,
real_ppo_eval_every_epochs=0,
simulation_flip_first_random_for_beginning=True,
)
update_hparams(hparams, ppo_params)
return hparams
|
rlmb_dqn_base params.
def rlmb_dqn_base():
"""rlmb_dqn_base params."""
hparams = _rlmb_base()
simulated_rollout_length = 10
dqn_params = dict(
base_algo="dqn",
base_algo_params="dqn_original_params",
real_batch_size=1,
simulated_batch_size=16,
dqn_agent_generates_trainable_dones=False,
eval_batch_size=1,
# Must be equal to dqn_time_limit for now
simulated_rollout_length=simulated_rollout_length,
dqn_time_limit=simulated_rollout_length,
simulation_flip_first_random_for_beginning=False,
dqn_eval_episodes_num=3,
# TODO(kc): only for model-free compatibility, remove this
epochs_num=-1,
)
update_hparams(hparams, dqn_params)
return hparams
|
Base setting but quicker with only 2 epochs.
def rlmb_ppo_quick():
"""Base setting but quicker with only 2 epochs."""
hparams = rlmb_ppo_base()
hparams.epochs = 2
hparams.model_train_steps = 25000
hparams.ppo_epochs_num = 700
hparams.ppo_epoch_length = 50
return hparams
|
Base setting with a stochastic next-frame model.
def rlmb_base_stochastic():
"""Base setting with a stochastic next-frame model."""
hparams = rlmb_base()
hparams.initial_epoch_train_steps_multiplier = 5
hparams.generative_model = "next_frame_basic_stochastic"
hparams.generative_model_params = "next_frame_basic_stochastic"
return hparams
|
Base setting with stochastic discrete model.
def rlmb_base_stochastic_discrete():
"""Base setting with stochastic discrete model."""
hparams = rlmb_base()
hparams.learning_rate_bump = 1.0
hparams.grayscale = False
hparams.generative_model = "next_frame_basic_stochastic_discrete"
hparams.generative_model_params = "next_frame_basic_stochastic_discrete"
# The parameters below are the same as base, but repeated for easier reading.
hparams.ppo_epoch_length = 50
hparams.simulated_rollout_length = 50
hparams.simulated_batch_size = 16
return hparams
|
Long setting with stochastic discrete model & deterministic sim starts.
def rlmb_long_stochastic_discrete_simulation_deterministic_starts():
"""Long setting with stochastic discrete model & deterministic sim starts."""
hparams = rlmb_base_stochastic_discrete()
hparams.generative_model_params = "next_frame_basic_stochastic_discrete_long"
hparams.ppo_epochs_num = 1000
hparams.simulation_random_starts = False
return hparams
|
Long setting with stochastic discrete model, changed ppo steps.
def rlmb_long_stochastic_discrete_100steps():
"""Long setting with stochastic discrete model, changed ppo steps."""
hparams = rlmb_long_stochastic_discrete()
hparams.ppo_epoch_length = 100
hparams.simulated_rollout_length = 100
hparams.simulated_batch_size = 8
return hparams
|
Long setting with stochastic discrete model, changed ppo steps.
def rlmb_long_stochastic_discrete_25steps():
"""Long setting with stochastic discrete model, changed ppo steps."""
hparams = rlmb_long_stochastic_discrete()
hparams.ppo_epoch_length = 25
hparams.simulated_rollout_length = 25
hparams.simulated_batch_size = 32
return hparams
|
Base setting with stochastic discrete model.
def rlmb_base_stochastic_discrete_noresize():
"""Base setting with stochastic discrete model."""
hparams = rlmb_base()
hparams.generative_model = "next_frame_basic_stochastic_discrete"
hparams.generative_model_params = "next_frame_basic_stochastic_discrete"
hparams.resize_height_factor = 1
hparams.resize_width_factor = 1
return hparams
|
Base setting with sv2p as world model.
def rlmb_base_sv2p():
"""Base setting with sv2p as world model."""
hparams = rlmb_base()
hparams.learning_rate_bump = 1.0
hparams.generative_model = "next_frame_sv2p"
hparams.generative_model_params = "next_frame_sv2p_atari"
return hparams
|
Parameters to override for tiny setting excluding agent-related hparams.
def _rlmb_tiny_overrides():
"""Parameters to override for tiny setting excluding agent-related hparams."""
return dict(
epochs=1,
num_real_env_frames=128,
model_train_steps=2,
max_num_noops=1,
eval_max_num_noops=1,
generative_model_params="next_frame_tiny",
stop_loop_early=True,
resize_height_factor=2,
resize_width_factor=2,
wm_eval_rollout_ratios=[1],
rl_env_max_episode_steps=7,
eval_rl_env_max_episode_steps=7,
simulated_rollout_length=2,
eval_sampling_temps=[0.0, 1.0],
)
|
Tiny set for testing.
def rlmb_ppo_tiny():
"""Tiny set for testing."""
hparams = rlmb_ppo_base()
hparams = hparams.override_from_dict(_rlmb_tiny_overrides())
update_hparams(hparams, dict(
ppo_epochs_num=2,
ppo_epoch_length=10,
real_ppo_epoch_length=36,
real_ppo_effective_num_agents=2,
real_batch_size=1,
eval_batch_size=1,
))
return hparams
|
Tiny set for testing.
def rlmb_dqn_tiny():
"""Tiny set for testing."""
hparams = rlmb_dqn_base()
hparams = hparams.override_from_dict(_rlmb_tiny_overrides())
update_hparams(hparams, dict(
simulated_rollout_length=2,
dqn_time_limit=2,
dqn_num_frames=128,
real_dqn_replay_buffer_replay_capacity=100,
dqn_replay_buffer_replay_capacity=100,
real_dqn_agent_min_replay_history=10,
dqn_agent_min_replay_history=10,
))
return hparams
|
Tiny setting with a stochastic next-frame model.
def rlmb_tiny_stochastic():
"""Tiny setting with a stochastic next-frame model."""
hparams = rlmb_ppo_tiny()
hparams.epochs = 1 # Too slow with 2 for regular runs.
hparams.generative_model = "next_frame_basic_stochastic"
hparams.generative_model_params = "next_frame_basic_stochastic"
return hparams
|
Tiny setting with a recurrent next-frame model.
def rlmb_tiny_recurrent():
"""Tiny setting with a recurrent next-frame model."""
hparams = rlmb_ppo_tiny()
hparams.epochs = 1 # Too slow with 2 for regular runs.
hparams.generative_model = "next_frame_basic_recurrent"
hparams.generative_model_params = "next_frame_basic_recurrent"
return hparams
|
Tiny setting with a tiny sv2p model.
def rlmb_tiny_sv2p():
"""Tiny setting with a tiny sv2p model."""
hparams = rlmb_ppo_tiny()
hparams.generative_model = "next_frame_sv2p"
hparams.generative_model_params = "next_frame_sv2p_tiny"
hparams.grayscale = False
return hparams
|
Grid over games and frames, and 5 runs each for variance.
def rlmb_grid(rhp):
"""Grid over games and frames, and 5 runs each for variance."""
rhp.set_categorical("loop.game", ["breakout", "pong", "freeway"])
base = 100000
medium = base // 2
small = medium // 2
rhp.set_discrete("loop.num_real_env_frames", [base, medium, small])
# Dummy parameter to get 5 runs for each configuration
rhp.set_discrete("model.moe_loss_coef", list(range(5)))
|
Merge multiple HParams into one with scopes.
def merge_unscoped_hparams(scopes_and_hparams):
"""Merge multiple HParams into one with scopes."""
merged_values = {}
for (scope, hparams) in scopes_and_hparams:
for key, value in six.iteritems(hparams.values()):
scoped_key = "%s.%s" % (scope, key)
merged_values[scoped_key] = value
return hparam.HParams(**merged_values)
|
Split single HParams with scoped keys into multiple.
def split_scoped_hparams(scopes, merged_hparams):
"""Split single HParams with scoped keys into multiple."""
split_values = {scope: {} for scope in scopes}
merged_values = merged_hparams.values()
for scoped_key, value in six.iteritems(merged_values):
scope = scoped_key.split(".")[0]
key = scoped_key[len(scope) + 1:]
split_values[scope][key] = value
return [
hparam.HParams(**split_values[scope]) for scope in scopes
]
|
Create HParams suitable for training loop from scoped HParams.
Args:
scoped_overrides: HParams, with keys all scoped by one of HP_SCOPES. These
parameters are overrides for the base HParams created by
create_loop_hparams.
trial_id: str, trial identifier. This is used to register unique HParams
names for the underlying model and ppo HParams.
Returns:
HParams suitable for passing to training_loop.
def training_loop_hparams_from_scoped_overrides(scoped_overrides, trial_id):
"""Create HParams suitable for training loop from scoped HParams.
Args:
scoped_overrides: HParams, with keys all scoped by one of HP_SCOPES. These
parameters are overrides for the base HParams created by
create_loop_hparams.
trial_id: str, trial identifier. This is used to register unique HParams
names for the underlying model and ppo HParams.
Returns:
HParams suitable for passing to training_loop.
"""
trial_hp_overrides = scoped_overrides.values()
# Create loop, model, and ppo base HParams
loop_hp = create_loop_hparams()
model_hp_name = trial_hp_overrides.get(
"loop.generative_model_params", loop_hp.generative_model_params)
model_hp = registry.hparams(model_hp_name).parse(FLAGS.hparams)
base_algo_params_name = trial_hp_overrides.get(
"loop.base_algo_params", loop_hp.base_algo_params)
algo_hp = registry.hparams(base_algo_params_name)
# Merge them and then override with the scoped overrides
combined_hp = merge_unscoped_hparams(
zip(HP_SCOPES, [loop_hp, model_hp, algo_hp]))
combined_hp.override_from_dict(trial_hp_overrides)
# Split out the component hparams
loop_hp, model_hp, algo_hp = (
split_scoped_hparams(HP_SCOPES, combined_hp))
# Dynamic register the model hp and set the new name in loop_hp
model_hp_name = "model_hp_%s" % str(trial_id)
dynamic_register_hparams(model_hp_name, model_hp)
loop_hp.generative_model_params = model_hp_name
# Dynamic register the algo hp and set the new name in loop_hp
algo_hp_name = "algo_hp_%s" % str(trial_id)
dynamic_register_hparams(algo_hp_name, algo_hp)
loop_hp.base_algo_params = algo_hp_name
return loop_hp
|
Get mapping from keyboard keys to actions.
Required by gym.utils.play in environment or top level wrapper.
Returns:
{
Unicode code point for keyboard key: action (formatted for step()),
...
}
def get_keys_to_action(self):
"""Get mapping from keyboard keys to actions.
Required by gym.utils.play in environment or top level wrapper.
Returns:
{
Unicode code point for keyboard key: action (formatted for step()),
...
}
"""
# Based on gym AtariEnv.get_keys_to_action()
keyword_to_key = {
"UP": ord("w"),
"DOWN": ord("s"),
"LEFT": ord("a"),
"RIGHT": ord("d"),
"FIRE": ord(" "),
}
keys_to_action = {}
for action_id, action_meaning in enumerate(self.action_meanings):
keys_tuple = tuple(sorted([
key for keyword, key in keyword_to_key.items()
if keyword in action_meaning]))
assert keys_tuple not in keys_to_action
keys_to_action[keys_tuple] = action_id
# Special actions:
keys_to_action[(ord("r"),)] = self.RETURN_DONE_ACTION
keys_to_action[(ord("c"),)] = self.TOGGLE_WAIT_ACTION
keys_to_action[(ord("n"),)] = self.WAIT_MODE_NOOP_ACTION
return keys_to_action
|
Pass action to underlying environment(s) or perform special action.
def step(self, action):
"""Pass action to underlying environment(s) or perform special action."""
# Special codes
if action in self._player_actions():
envs_step_tuples = self._player_actions()[action]()
elif self._wait and action == self.name_to_action_num["NOOP"]:
# Ignore no-op, do not pass to environment.
envs_step_tuples = self._last_step_tuples
else:
# Run action on environment(s).
if action == self.WAIT_MODE_NOOP_ACTION:
action = self.name_to_action_num["NOOP"]
# Perform action on underlying environment(s).
envs_step_tuples = self._step_envs(action)
self._update_statistics(envs_step_tuples)
self._last_step_tuples = envs_step_tuples
ob, reward, done, info = self._player_step_tuple(envs_step_tuples)
return ob, reward, done, info
|
Expand observation array with additional information header (top rows).
Args:
ob: observation
reward: reward to be included in header.
cumulative_reward: total cumulated reward to be included in header.
Returns:
Expanded observation array.
def _augment_observation(self, ob, reward, cumulative_reward):
""""Expand observation array with additional information header (top rows).
Args:
ob: observation
reward: reward to be included in header.
cumulative_reward: total cumulated reward to be included in header.
Returns:
Expanded observation array.
"""
img = PIL_Image().new("RGB",
(ob.shape[1], self.HEADER_HEIGHT,))
draw = PIL_ImageDraw().Draw(img)
draw.text(
(1, 0), "c:{:3}, r:{:3}".format(int(cumulative_reward), int(reward)),
fill=(255, 0, 0)
)
draw.text(
(1, 15), "fc:{:3}".format(int(self._frame_counter)),
fill=(255, 0, 0)
)
header = np.asarray(img)
del img
header.setflags(write=1)
# Top row color indicates if WAIT MODE is on.
if self._wait:
pixel_fill = (0, 255, 0)
else:
pixel_fill = (255, 0, 0)
header[0, :, :] = pixel_fill
return np.concatenate([header, ob], axis=0)
|
Construct observation, return usual step tuple.
Args:
envs_step_tuples: tuples.
Returns:
Step tuple: ob, reward, done, info
ob: concatenated images [simulated observation, real observation,
difference], with additional informations in header.
reward: real environment reward
done: True iff. envs_step_tuples['real_env'][2] is True
info: real environment info
def _player_step_tuple(self, envs_step_tuples):
"""Construct observation, return usual step tuple.
Args:
envs_step_tuples: tuples.
Returns:
Step tuple: ob, reward, done, info
ob: concatenated images [simulated observation, real observation,
difference], with additional informations in header.
reward: real environment reward
done: True iff. envs_step_tuples['real_env'][2] is True
info: real environment info
"""
ob_real, reward_real, _, _ = envs_step_tuples["real_env"]
ob_sim, reward_sim, _, _ = envs_step_tuples["sim_env"]
ob_err = absolute_hinge_difference(ob_sim, ob_real)
ob_real_aug = self._augment_observation(ob_real, reward_real,
self.cumulative_real_reward)
ob_sim_aug = self._augment_observation(ob_sim, reward_sim,
self.cumulative_sim_reward)
ob_err_aug = self._augment_observation(
ob_err, reward_sim - reward_real,
self.cumulative_sim_reward - self.cumulative_real_reward
)
ob = np.concatenate([ob_sim_aug, ob_real_aug, ob_err_aug], axis=1)
_, reward, done, info = envs_step_tuples["real_env"]
return ob, reward, done, info
|
Reset simulated and real environments.
def reset(self):
"""Reset simulated and real environments."""
self._frame_counter = 0
ob_real = self.real_env.reset()
# Initialize simulated environment with frames from real one.
self.sim_env.add_to_initial_stack(ob_real)
for _ in range(3):
ob_real, _, _, _ = self.real_env.step(self.name_to_action_num["NOOP"])
self.sim_env.add_to_initial_stack(ob_real)
ob_sim = self.sim_env.reset()
assert np.all(ob_real == ob_sim)
self._last_step_tuples = self._pack_step_tuples((ob_real, 0, False, {}),
(ob_sim, 0, False, {}))
self.set_zero_cumulative_rewards()
ob, _, _, _ = self._player_step_tuple(self._last_step_tuples)
return ob
|
Perform step(action) on environments and update initial_frame_stack.
def _step_envs(self, action):
"""Perform step(action) on environments and update initial_frame_stack."""
self._frame_counter += 1
real_env_step_tuple = self.real_env.step(action)
sim_env_step_tuple = self.sim_env.step(action)
self.sim_env.add_to_initial_stack(real_env_step_tuple[0])
return self._pack_step_tuples(real_env_step_tuple, sim_env_step_tuple)
|
Augment observation, return usual step tuple.
def _player_step_tuple(self, envs_step_tuples):
"""Augment observation, return usual step tuple."""
ob, reward, done, info = envs_step_tuples["env"]
ob = self._augment_observation(ob, reward, self.cumulative_reward)
return ob, reward, done, info
|
Compute time first and second-order derivative channels.
Args:
filterbanks: float32 tensor with shape [batch_size, len, num_bins, 1]
name: scope name
Returns:
float32 tensor with shape [batch_size, len, num_bins, 3]
def add_delta_deltas(filterbanks, name=None):
"""Compute time first and second-order derivative channels.
Args:
filterbanks: float32 tensor with shape [batch_size, len, num_bins, 1]
name: scope name
Returns:
float32 tensor with shape [batch_size, len, num_bins, 3]
"""
delta_filter = np.array([2, 1, 0, -1, -2])
delta_delta_filter = scipy.signal.convolve(delta_filter, delta_filter, "full")
delta_filter_stack = np.array(
[[0] * 4 + [1] + [0] * 4, [0] * 2 + list(delta_filter) + [0] * 2,
list(delta_delta_filter)],
dtype=np.float32).T[:, None, None, :]
delta_filter_stack /= np.sqrt(
np.sum(delta_filter_stack**2, axis=0, keepdims=True))
filterbanks = tf.nn.conv2d(
filterbanks, delta_filter_stack, [1, 1, 1, 1], "SAME", data_format="NHWC",
name=name)
return filterbanks
|
Implement mel-filterbank extraction using tf ops.
Args:
waveforms: float32 tensor with shape [batch_size, max_len]
sample_rate: sampling rate of the waveform
dither: stddev of Gaussian noise added to waveform to prevent quantization
artefacts
preemphasis: waveform high-pass filtering constant
frame_length: frame length in ms
frame_step: frame_Step in ms
fft_length: number of fft bins
window_fn: windowing function
lower_edge_hertz: lowest frequency of the filterbank
upper_edge_hertz: highest frequency of the filterbank
num_mel_bins: filterbank size
log_noise_floor: clip small values to prevent numeric overflow in log
apply_mask: When working on a batch of samples, set padding frames to zero
Returns:
filterbanks: a float32 tensor with shape [batch_size, len, num_bins, 1]
def compute_mel_filterbank_features(
waveforms,
sample_rate=16000, dither=1.0 / np.iinfo(np.int16).max, preemphasis=0.97,
frame_length=25, frame_step=10, fft_length=None,
window_fn=functools.partial(tf.contrib.signal.hann_window, periodic=True),
lower_edge_hertz=80.0, upper_edge_hertz=7600.0, num_mel_bins=80,
log_noise_floor=1e-3, apply_mask=True):
"""Implement mel-filterbank extraction using tf ops.
Args:
waveforms: float32 tensor with shape [batch_size, max_len]
sample_rate: sampling rate of the waveform
dither: stddev of Gaussian noise added to waveform to prevent quantization
artefacts
preemphasis: waveform high-pass filtering constant
frame_length: frame length in ms
frame_step: frame_Step in ms
fft_length: number of fft bins
window_fn: windowing function
lower_edge_hertz: lowest frequency of the filterbank
upper_edge_hertz: highest frequency of the filterbank
num_mel_bins: filterbank size
log_noise_floor: clip small values to prevent numeric overflow in log
apply_mask: When working on a batch of samples, set padding frames to zero
Returns:
filterbanks: a float32 tensor with shape [batch_size, len, num_bins, 1]
"""
# `stfts` is a complex64 Tensor representing the short-time Fourier
# Transform of each signal in `signals`. Its shape is
# [batch_size, ?, fft_unique_bins]
# where fft_unique_bins = fft_length // 2 + 1
# Find the wave length: the largest index for which the value is !=0
# note that waveforms samples that are exactly 0.0 are quite common, so
# simply doing sum(waveforms != 0, axis=-1) will not work correctly.
wav_lens = tf.reduce_max(
tf.expand_dims(tf.range(tf.shape(waveforms)[1]), 0) *
tf.to_int32(tf.not_equal(waveforms, 0.0)),
axis=-1) + 1
if dither > 0:
waveforms += tf.random_normal(tf.shape(waveforms), stddev=dither)
if preemphasis > 0:
waveforms = waveforms[:, 1:] - preemphasis * waveforms[:, :-1]
wav_lens -= 1
frame_length = int(frame_length * sample_rate / 1e3)
frame_step = int(frame_step * sample_rate / 1e3)
if fft_length is None:
fft_length = int(2**(np.ceil(np.log2(frame_length))))
stfts = tf.contrib.signal.stft(
waveforms,
frame_length=frame_length,
frame_step=frame_step,
fft_length=fft_length,
window_fn=window_fn,
pad_end=True)
stft_lens = (wav_lens + (frame_step - 1)) // frame_step
masks = tf.to_float(tf.less_equal(
tf.expand_dims(tf.range(tf.shape(stfts)[1]), 0),
tf.expand_dims(stft_lens, 1)))
# An energy spectrogram is the magnitude of the complex-valued STFT.
# A float32 Tensor of shape [batch_size, ?, 257].
magnitude_spectrograms = tf.abs(stfts)
# Warp the linear-scale, magnitude spectrograms into the mel-scale.
num_spectrogram_bins = magnitude_spectrograms.shape[-1].value
linear_to_mel_weight_matrix = (
tf.contrib.signal.linear_to_mel_weight_matrix(
num_mel_bins, num_spectrogram_bins, sample_rate, lower_edge_hertz,
upper_edge_hertz))
mel_spectrograms = tf.tensordot(
magnitude_spectrograms, linear_to_mel_weight_matrix, 1)
# Note: Shape inference for tensordot does not currently handle this case.
mel_spectrograms.set_shape(magnitude_spectrograms.shape[:-1].concatenate(
linear_to_mel_weight_matrix.shape[-1:]))
log_mel_sgram = tf.log(tf.maximum(log_noise_floor, mel_spectrograms))
if apply_mask:
log_mel_sgram *= tf.expand_dims(tf.to_float(masks), -1)
return tf.expand_dims(log_mel_sgram, -1, name="mel_sgrams")
|
Plays the env problem by randomly sampling actions for `num_steps`.
def play_env_problem_randomly(env_problem,
num_steps):
"""Plays the env problem by randomly sampling actions for `num_steps`."""
# Reset all environments.
env_problem.reset()
# Play all environments, sampling random actions each time.
for _ in range(num_steps):
# Sample batch_size actions from the action space and stack them.
actions = np.stack([env_problem.action_space.sample() for _ in range(
env_problem.batch_size)])
# Execute actions, observations are stored in `env_problem`.
_, _, dones, _ = env_problem.step(actions)
# Get the indices where we are done and reset those.
env_problem.reset(indices=done_indices(dones))
|
Generates samples of text from the provided vocabulary.
Args:
plain_vocab: vocabulary.
distribution: distribution.
train_samples: samples for training.
length: length.
Returns:
train_indices (np.array of Integers): random integers for training.
shape = [num_samples, length]
test_indices (np.array of Integers): random integers for testing.
shape = [num_samples, length]
plain_vocab (list of Integers): unique vocabularies.
def generate_plaintext_random(plain_vocab, distribution, train_samples,
length):
"""Generates samples of text from the provided vocabulary.
Args:
plain_vocab: vocabulary.
distribution: distribution.
train_samples: samples for training.
length: length.
Returns:
train_indices (np.array of Integers): random integers for training.
shape = [num_samples, length]
test_indices (np.array of Integers): random integers for testing.
shape = [num_samples, length]
plain_vocab (list of Integers): unique vocabularies.
"""
if distribution is not None:
assert len(distribution) == len(plain_vocab)
train_indices = np.random.choice(
range(len(plain_vocab)), (train_samples, length), p=distribution)
return train_indices
|
Encrypt plain text with a single shift layer.
Args:
plaintext (list of list of Strings): a list of plain text to encrypt.
plain_vocab (list of Integer): unique vocabularies being used.
shift (Integer): number of shift, shift to the right if shift is positive.
Returns:
ciphertext (list of Strings): encrypted plain text.
def encipher_shift(plaintext, plain_vocab, shift):
"""Encrypt plain text with a single shift layer.
Args:
plaintext (list of list of Strings): a list of plain text to encrypt.
plain_vocab (list of Integer): unique vocabularies being used.
shift (Integer): number of shift, shift to the right if shift is positive.
Returns:
ciphertext (list of Strings): encrypted plain text.
"""
ciphertext = []
cipher = ShiftEncryptionLayer(plain_vocab, shift)
for _, sentence in enumerate(plaintext):
cipher_sentence = []
for _, character in enumerate(sentence):
encrypted_char = cipher.encrypt_character(character)
cipher_sentence.append(encrypted_char)
ciphertext.append(cipher_sentence)
return ciphertext
|
Encrypt plain text with given key.
Args:
plaintext (list of list of Strings): a list of plain text to encrypt.
plain_vocab (list of Integer): unique vocabularies being used.
key (list of Integer): key to encrypt cipher using Vigenere table.
Returns:
ciphertext (list of Strings): encrypted plain text.
def encipher_vigenere(plaintext, plain_vocab, key):
"""Encrypt plain text with given key.
Args:
plaintext (list of list of Strings): a list of plain text to encrypt.
plain_vocab (list of Integer): unique vocabularies being used.
key (list of Integer): key to encrypt cipher using Vigenere table.
Returns:
ciphertext (list of Strings): encrypted plain text.
"""
ciphertext = []
# generate Vigenere table
layers = [
ShiftEncryptionLayer(plain_vocab, i) for i in range(len(plain_vocab))
]
for i, sentence in enumerate(plaintext):
cipher_sentence = []
for j, character in enumerate(sentence):
key_idx = key[j % len(key)]
encrypted_char = layers[key_idx].encrypt_character(character)
cipher_sentence.append(encrypted_char)
ciphertext.append(cipher_sentence)
return ciphertext
|
A stack of super_lm layers.
Args:
inputs: a list of Tensors
attention_bias: list of bias Tensor for self-attention
(see common_attention.attention_bias())
hparams: hyperparameters for model
mp: a Parallelism object
padding: a string
Returns:
y: a list of Tensors
extra_loss: an optional scalar
def _super_stack(inputs,
attention_bias,
hparams,
mp,
padding="LEFT"):
"""A stack of super_lm layers.
Args:
inputs: a list of Tensors
attention_bias: list of bias Tensor for self-attention
(see common_attention.attention_bias())
hparams: hyperparameters for model
mp: a Parallelism object
padding: a string
Returns:
y: a list of Tensors
extra_loss: an optional scalar
"""
layers = hparams.layers.strip(",").split(",")
moe_hidden_sizes = [int(s) for s in hparams.moe_hidden_sizes.split(",")]
if hparams.diet_experts:
hsize, = moe_hidden_sizes
def _diet_expert(x):
return diet.diet_expert(x, hsize, diet.diet_adam_optimizer_params())
expert_fn = _diet_expert
else:
expert_fn = expert_utils.ffn_expert_fn(
hparams.hidden_size, moe_hidden_sizes, hparams.hidden_size)
# scaled_dot_product_attention_with_projections uses a 3d attention bias
# (no heads), where multihead_attention uses 4d attention bias.
attention_bias_3d = mp(tf.squeeze, attention_bias, 1)
mix_size = int(hparams.mix_fraction * hparams.hidden_size)
accumulator = inputs
x = inputs
extra_losses = []
for layer_num, layer_type in enumerate(layers):
with tf.variable_scope("%s_%d" % (layer_type, layer_num)):
tf.logging.info("%s_%d" % (layer_type, layer_num))
if layer_type == "a":
# accumulate
accumulator = mp(tf.add, x, accumulator)
x = accumulator
elif layer_type == "n":
# normalize
x = mp(common_layers.apply_norm,
x, hparams.norm_type, hparams.hidden_size, hparams.norm_epsilon)
elif layer_type == "d":
# dropout
x = mp(tf.nn.dropout, x, 1.0 - hparams.layer_prepostprocess_dropout)
elif layer_type == "m":
# mix across shards
def _split(t):
return tuple(tf.split(
t, [mix_size, hparams.hidden_size - mix_size], 2))
to_mix, to_keep = mp(_split, x)
mixed = expert_utils.all_reduce_ring(to_mix, mp)
mixed = mp(tf.multiply, mixed, mp.n ** -0.5)
x = mp(lambda a, b: tf.concat([a, b], 2), mixed, to_keep)
elif layer_type == "att":
# single-head attention
q = mp(tf.layers.dense, x, hparams.hidden_size, use_bias=False,
name="q_transform")
x = mp(
common_attention.scaled_dot_product_attention_simple,
q, x, x, attention_bias_3d)
x = mp(tf.layers.dense, x, hparams.hidden_size, use_bias=False,
name="o_transform")
elif layer_type == "multihead-att":
# multi-head attention
x = mp(
common_attention.multihead_attention,
x,
None,
attention_bias, # bias
hparams.multihead_attention_key_channels or hparams.hidden_size,
hparams.multihead_attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.multihead_attention_num_heads,
hparams.attention_dropout)
elif layer_type == "ffn":
x = mp(
common_layers.dense_relu_dense, x,
hparams.filter_size, hparams.hidden_size)
elif layer_type == "conv":
# convolution
x = mp(
common_layers.conv1d,
x,
hparams.hidden_size,
hparams.kernel_height,
activation=tf.nn.relu,
padding=padding,
)
elif layer_type == "moe":
# mixture of experts - each model shard has its own local MoE.
x, loss = mp(
expert_utils.local_moe,
x,
train=hparams.mode == tf.estimator.ModeKeys.TRAIN,
expert_fn=expert_fn,
num_experts=hparams.moe_num_experts,
k=hparams.moe_k,
loss_coef=hparams.moe_loss_coef)
extra_losses.extend(loss)
else:
assert False, "unknown sublayer %s" % layer_type
if extra_losses:
extra_loss = tf.add_n(extra_losses)
else:
extra_loss = None
return x, extra_loss
|
Set of hyperparameters.
def super_lm_base():
"""Set of hyperparameters."""
hparams = common_hparams.basic_params1()
hparams.hidden_size = 512
hparams.moe_hidden_sizes = "512"
hparams.batch_size = 16384
hparams.max_length = 0
# All hyperparameters ending in "dropout" are automatically set to 0.0
# when not in training mode.
hparams.layer_prepostprocess_dropout = 0.0
hparams.symbol_dropout = 0.1
hparams.add_hparam("attention_dropout", 0.0)
hparams.label_smoothing = 0.0
hparams.clip_grad_norm = 0. # i.e. no gradient clipping
hparams.optimizer = "Adafactor"
hparams.learning_rate_decay_scheme = "noam"
hparams.learning_rate = 0.1
hparams.learning_rate_warmup_steps = 8000
hparams.initializer_gain = 1.0
hparams.initializer = "uniform_unit_scaling"
hparams.weight_decay = 0.0
hparams.shared_embedding_and_softmax_weights = False
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
# we only want one data shard.
hparams.no_data_parallelism = True
# bypass the symbol modality so that we can use model parallelism.
hparams.bottom = {
"inputs": modalities.identity_bottom,
"targets": modalities.identity_bottom,
}
hparams.top = {
"targets": modalities.identity_top,
}
hparams.add_hparam("filter_size", 512)
hparams.add_hparam("mix_fraction", 0.5)
# attention-related flags
hparams.add_hparam("multihead_attention_num_heads", 4)
hparams.add_hparam("multihead_attention_key_channels", 0)
hparams.add_hparam("multihead_attention_value_channels", 0)
hparams.add_hparam("pos", "timing") # timing, none
hparams.add_hparam(
"layers", ("n,att,m,d,a," "n,ffn,m,d,a,") * 4 + "n,ffn,d")
# Number of model shards - each one has separate parameters.
# Changing this number invalidates checkpoints.
hparams.add_hparam("num_model_shards", 8)
hparams.add_hparam("diet_experts", False)
return hparams
|
Add mixture of experts with ~1B params.
def super_lm_moe():
"""Add mixture of experts with ~1B params."""
hparams = super_lm_base()
hparams.layers = (
("n,att,m,d,a," "n,moe,m,d,a,") * 4 + "n,ffn,d")
hparams.moe_num_experts = 32
hparams.moe_hidden_sizes = "1024"
return hparams
|
Series of architectural experiments on Translation.
# run on 8-core setup
119M params, einsum=0.95e13
Returns:
a hparams
def xmoe_tr_dense_2k():
"""Series of architectural experiments on Translation.
# run on 8-core setup
119M params, einsum=0.95e13
Returns:
a hparams
"""
hparams = mtf_transformer2.mtf_bitransformer_base()
hparams.encoder_layers = ["self_att", "drd"] * 4
hparams.decoder_layers = ["self_att", "enc_att", "drd"] * 4
hparams.batch_size = 64
hparams.shared_embedding_and_softmax_weights = True
hparams.mesh_shape = "batch:8"
return hparams
|
Mixture of experts (16 experts).
623M Params, einsum=1.09e13
Returns:
a hparams
def xmoe_tr_1d():
"""Mixture of experts (16 experts).
623M Params, einsum=1.09e13
Returns:
a hparams
"""
hparams = xmoe_tr_dense_2k()
hparams.encoder_layers = ["self_att", "moe_1d"] * 4
hparams.decoder_layers = ["self_att", "enc_att", "moe_1d"] * 4
hparams.layout = "batch:batch;experts:batch"
hparams.moe_hidden_size = 2048
hparams.moe_num_experts = 16
return hparams
|
Mixture of experts (16 experts).
623M Params, einsum=1.09e13
Returns:
a hparams
def xmoe_tr_2d():
"""Mixture of experts (16 experts).
623M Params, einsum=1.09e13
Returns:
a hparams
"""
hparams = xmoe_tr_dense_2k()
hparams.mesh_shape = "b0:2;b1:4"
hparams.outer_batch_size = 4
hparams.layout = "outer_batch:b0;inner_batch:b1,expert_x:b1,expert_y:b0"
hparams.encoder_layers = ["self_att", "moe_2d"] * 4
hparams.decoder_layers = ["self_att", "enc_att", "moe_2d"] * 4
hparams.moe_hidden_size = 2048
hparams.moe_experts_x = 4
hparams.moe_experts_y = 4
return hparams
|
Series of architectural experiments on cheap language models.
For all of these architectures, we run on languagemodel_lm1b8k_packed
for 32000 steps.
All log-perplexities are per-token - multiply by 1.298 for per-word
Results:
model params(M) einsum alltoall mxu-util log-ppl
xmoe_dense_4k 30 3.0e12 0 45% 3.31
xmoe_dense_8k 46 4.7e12 0 49% 3.24
xmoe_dense_64k 282 2.8e13 0 3.06
xmoe_top_2 282 4.0e12 3.4e8 36% 3.07
xmoe_top_2_c15 282 4.5e12 4.0e8 38% 3.07
xmoe_2d 282 5.3e12 7.6e8 34% 3.06
Trained at 4x the batch size:
xmoe_2d_88 1090 2.1e13 3.0e9 24% 3.07
Note: configurations and code are likely to change without notice.
Returns:
a hparams
def xmoe_dense_4k():
"""Series of architectural experiments on cheap language models.
For all of these architectures, we run on languagemodel_lm1b8k_packed
for 32000 steps.
All log-perplexities are per-token - multiply by 1.298 for per-word
Results:
model params(M) einsum alltoall mxu-util log-ppl
xmoe_dense_4k 30 3.0e12 0 45% 3.31
xmoe_dense_8k 46 4.7e12 0 49% 3.24
xmoe_dense_64k 282 2.8e13 0 3.06
xmoe_top_2 282 4.0e12 3.4e8 36% 3.07
xmoe_top_2_c15 282 4.5e12 4.0e8 38% 3.07
xmoe_2d 282 5.3e12 7.6e8 34% 3.06
Trained at 4x the batch size:
xmoe_2d_88 1090 2.1e13 3.0e9 24% 3.07
Note: configurations and code are likely to change without notice.
Returns:
a hparams
"""
hparams = mtf_transformer.mtf_transformer_base_lm()
hparams.attention_dropout = 0.0
hparams.relu_dropout = 0.0
hparams.layer_prepostprocess_dropout = 0.0
# The following hparams are constant across all these experiments.
hparams.batch_size = 128
hparams.d_model = 512
hparams.d_kv = 128
hparams.num_heads = 4
hparams.decoder_layers = ["att", "drd"] * 4
hparams.shared_embedding_and_softmax_weights = False
hparams.learning_rate_schedule = "rsqrt_decay"
# We will vary the following parameters related to the ffn/moe layers.
hparams.d_ff = 4096
hparams.layout = "batch:batch;vocab:model;d_ff:model;heads:model"
hparams.mesh_shape = "batch:8"
return hparams
|
Mixture of experts (16 experts).
def xmoe_top_2():
"""Mixture of experts (16 experts)."""
hparams = xmoe_dense_4k()
moe.set_default_moe_hparams(hparams)
hparams.mesh_shape = "all:8"
hparams.layout = "batch:all;experts:all"
return hparams
|
Two-dimensional hierarchical mixture of 16 experts.
def xmoe_2d():
"""Two-dimensional hierarchical mixture of 16 experts."""
hparams = xmoe_top_2()
hparams.decoder_layers = ["att", "hmoe"] * 4
hparams.mesh_shape = "b0:2;b1:4"
hparams.outer_batch_size = 4
hparams.layout = "outer_batch:b0;inner_batch:b1,expert_x:b1,expert_y:b0"
hparams.moe_num_experts = [4, 4]
return hparams
|
Series of architectural experiments on language modeling.
Larger models than the ones above.
All models are trained on sequences of 1024 tokens.
We assume infinite training data, so no dropout necessary.
We process 2^36 tokens in training = 524288 steps at batch size 128
TODO(noam): find a large enough dataset for these experiments.
You can use languagemodel_wiki_noref_v32k_l1k, but this is too small,
(1 epoch = ~46000 steps) so training will cover about 11 epochs.
Note: configurations and code are likely to change without notice.
Run on TPU 4x4 for 524288 steps unless otherwise indicated.
Args:
sz: an integer
Returns:
a hparams
def xmoe2_dense(sz):
"""Series of architectural experiments on language modeling.
Larger models than the ones above.
All models are trained on sequences of 1024 tokens.
We assume infinite training data, so no dropout necessary.
We process 2^36 tokens in training = 524288 steps at batch size 128
TODO(noam): find a large enough dataset for these experiments.
You can use languagemodel_wiki_noref_v32k_l1k, but this is too small,
(1 epoch = ~46000 steps) so training will cover about 11 epochs.
Note: configurations and code are likely to change without notice.
Run on TPU 4x4 for 524288 steps unless otherwise indicated.
Args:
sz: an integer
Returns:
a hparams
"""
hparams = mtf_transformer.mtf_transformer_paper_lm(sz)
hparams.attention_dropout = 0.0
hparams.relu_dropout = 0.0
hparams.layer_prepostprocess_dropout = 0.0
hparams.max_length = 1024
hparams.batch_size = 128
hparams.learning_rate_schedule = "rsqrt_decay*linear_decay"
hparams.learning_rate_decay_steps = 65536
hparams.layout = "batch:batch;vocab:model;d_ff:model;heads:model"
hparams.mesh_shape = "batch:32"
return hparams
|
Model incorporating mixture-of-experts and local-attention.
~6B parameters
32 experts in 3 hierarchichal moe layers.
Returns:
a hparams
def xmoe2_v1():
"""Model incorporating mixture-of-experts and local-attention.
~6B parameters
32 experts in 3 hierarchichal moe layers.
Returns:
a hparams
"""
hparams = xmoe2_dense(0)
moe.set_default_moe_hparams(hparams)
hparams.decoder_layers = (
["local_att", "local_att", "drd",
"att", "drd", "local_att", "local_att", "hmoe"] * 4)[:-1]
hparams.d_ff = 2048
hparams.d_kv = 128
hparams.moe_hidden_size = 32768
hparams.mesh_shape = "b0:4;b1:8"
hparams.layout = "outer_batch:b0;inner_batch:b1,expert_x:b1,expert_y:b0"
hparams.outer_batch_size = 4
hparams.moe_num_experts = [8, 4]
hparams.num_heads = 4
return hparams
|
128 experts, ~25B params - Train for 131072 steps on 8x8.
def xmoe2_v1_x128():
"""128 experts, ~25B params - Train for 131072 steps on 8x8."""
hparams = xmoe2_v1()
hparams.moe_num_experts = [16, 8]
hparams.outer_batch_size = 8
hparams.mesh_shape = "b0:8;b1:16"
hparams.batch_size = 512
hparams.learning_rate_decay_steps = 16384
return hparams
|
Test on local cpu.
def xmoe2_tiny():
"""Test on local cpu."""
hparams = xmoe2_v1()
hparams.decoder_layers = [
"local_att", "att", "compressed_att", "drd", "hmoe"]
hparams.d_model = 128
hparams.moe_hidden_size = 512
hparams.outer_batch_size = 0
hparams.batch_size = 2
hparams.mesh_shape = ""
hparams.activation_dtype = "float32"
return hparams
|
With sequence length 4096.
def xmoe2_v1_l4k():
"""With sequence length 4096."""
hparams = xmoe2_v1()
hparams.batch_size = 32
hparams.max_length = 4096
hparams.split_to_length = 4096
hparams.reshape_logits_hack = True
return hparams
|
With sequence length 4096.
def xmoe2_v1_l4k_local_only():
"""With sequence length 4096."""
hparams = xmoe2_v1_l4k()
hparams.decoder_layers = [
"local_att" if l == "att" else l for l in hparams.decoder_layers]
return hparams
|
With sequence length 4096.
def xmoe2_v1_l4k_global_only():
"""With sequence length 4096."""
hparams = xmoe2_v1_l4k()
hparams.decoder_layers = [
"att" if l == "local_att" else l for l in hparams.decoder_layers]
return hparams
|
With compressed attention.
def xmoe2_v1_l4k_compressed_c4():
"""With compressed attention."""
hparams = xmoe2_v1_l4k()
hparams.decoder_layers = [
"compressed_att" if l == "att" else l for l in hparams.decoder_layers]
hparams.compression_factor = 4
return hparams
|
Set of architectural experiments - language model on wikipedia on a 2x2.
1 epoch = ~180k steps at batch size 32 - we may never finish an epoch!
Returns:
a hparams
def wiki_2x2_base():
"""Set of architectural experiments - language model on wikipedia on a 2x2.
1 epoch = ~180k steps at batch size 32 - we may never finish an epoch!
Returns:
a hparams
"""
hparams = mtf_transformer.mtf_transformer_base_lm()
hparams.shared_embedding_and_softmax_weights = False
# no dropout - dataset is big enough to avoid overfitting.
hparams.attention_dropout = 0.0
hparams.relu_dropout = 0.0
hparams.layer_prepostprocess_dropout = 0.0
hparams.max_length = 1024
# 4 sequences per core
hparams.batch_size = 32
# We don't use linear decay in these experiments, since we don't want
# a sharp jump in quality at the end of the training schedule.
# You can insert this once you find the right architecture.
hparams.learning_rate_schedule = "rsqrt_decay"
hparams.mesh_shape = "all:8"
hparams.layout = "batch:all;experts:all"
# parameters for mixture-of-experts
moe.set_default_moe_hparams(hparams)
hparams.moe_num_experts = 16
hparams.moe_hidden_size = 8192
hparams.decoder_layers = ["att", "drd"] * 6
hparams.d_model = 1024
hparams.d_ff = 2048
hparams.d_kv = 128
hparams.num_heads = 4
return hparams
|
Replace tokens instead of masking.
def denoise_z15():
"""Replace tokens instead of masking."""
hparams = xmoe2_dense_0()
hparams.decoder_type = "denoising"
hparams.noising_spec_train = {"type": "random_zipfian", "prob": 0.15}
hparams.noising_use_eval_during_train = 0.25
return hparams
|
Denoising experiment.
def denoise_v1_m15():
"""Denoising experiment."""
hparams = xmoe2_v1()
# no local attention
# TODO(noam): non-masked version of local-attention
hparams.decoder_layers = [
"att" if l == "local_att" else l for l in hparams.decoder_layers]
hparams.decoder_type = "denoising"
hparams.noising_spec_train = {"type": "mask", "prob": 0.15}
return hparams
|
Downloads and extracts the dataset.
Args:
tmp_dir: temp directory to download and extract the dataset
data_dir: The base directory where data and vocab files are stored.
Returns:
tmp_dir: temp directory containing the raw data.
def _download_mlu_data(tmp_dir, data_dir):
"""Downloads and extracts the dataset.
Args:
tmp_dir: temp directory to download and extract the dataset
data_dir: The base directory where data and vocab files are stored.
Returns:
tmp_dir: temp directory containing the raw data.
"""
if not tf.gfile.Exists(data_dir):
tf.gfile.MakeDirs(data_dir)
filename = os.path.basename(_URL)
file_path = os.path.join(tmp_dir, filename)
headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/63.0.3239.132 Safari/537.36"}
resp = requests.get(_URL, headers=headers)
with open(file_path, "wb") as f:
f.write(resp.content)
with tarfile.open(file_path, "r:gz") as tar:
tar.extractall(tmp_dir)
return tmp_dir
|
Get a Counter with the ngrams of the given ID list.
Args:
ids: np.array or a list corresponding to a single sentence
n: n-gram size
Returns:
collections.Counter with ID tuples as keys and 1s as values.
def _get_ngram_counter(ids, n):
"""Get a Counter with the ngrams of the given ID list.
Args:
ids: np.array or a list corresponding to a single sentence
n: n-gram size
Returns:
collections.Counter with ID tuples as keys and 1s as values.
"""
# Remove zero IDs used to pad the sequence.
ids = [token_id for token_id in ids if token_id != 0]
ngram_list = [tuple(ids[i:i + n]) for i in range(len(ids) + 1 - n)]
ngrams = set(ngram_list)
counts = collections.Counter()
for ngram in ngrams:
counts[ngram] = 1
return counts
|
Compute Fbeta score.
Args:
true_positives: Number of true positive ngrams.
selected: Number of selected ngrams.
relevant: Number of relevant ngrams.
beta: 0 gives precision only, 1 gives F1 score, and Inf gives recall only.
Returns:
Fbeta score.
def _get_fbeta_score(true_positives, selected, relevant, beta=1):
"""Compute Fbeta score.
Args:
true_positives: Number of true positive ngrams.
selected: Number of selected ngrams.
relevant: Number of relevant ngrams.
beta: 0 gives precision only, 1 gives F1 score, and Inf gives recall only.
Returns:
Fbeta score.
"""
precision = 1
if selected > 0:
precision = true_positives / selected
if beta == 0:
return precision
recall = 1
if relevant > 0:
recall = true_positives / relevant
if precision > 0 and recall > 0:
beta2 = beta * beta
return (1 + beta2) * precision * recall / (beta2 * precision + recall)
else:
return 0
|
Compute the addition score (Equation 4 in the paper).
def get_addition_score(source_counts, prediction_counts, target_counts):
"""Compute the addition score (Equation 4 in the paper)."""
added_to_prediction_counts = prediction_counts - source_counts
true_positives = sum((added_to_prediction_counts & target_counts).values())
selected = sum(added_to_prediction_counts.values())
# Note that in the paper the summation is done over all the ngrams in the
# output rather than the ngrams in the following set difference. Since the
# former does not make as much sense we compute the latter, which is also done
# in the GitHub implementation.
relevant = sum((target_counts - source_counts).values())
return _get_fbeta_score(true_positives, selected, relevant)
|
Compute the keep score (Equation 5 in the paper).
def get_keep_score(source_counts, prediction_counts, target_counts):
"""Compute the keep score (Equation 5 in the paper)."""
source_and_prediction_counts = source_counts & prediction_counts
source_and_target_counts = source_counts & target_counts
true_positives = sum((source_and_prediction_counts &
source_and_target_counts).values())
selected = sum(source_and_prediction_counts.values())
relevant = sum(source_and_target_counts.values())
return _get_fbeta_score(true_positives, selected, relevant)
|
Compute the deletion score (Equation 6 in the paper).
def get_deletion_score(source_counts, prediction_counts, target_counts, beta=0):
"""Compute the deletion score (Equation 6 in the paper)."""
source_not_prediction_counts = source_counts - prediction_counts
source_not_target_counts = source_counts - target_counts
true_positives = sum((source_not_prediction_counts &
source_not_target_counts).values())
selected = sum(source_not_prediction_counts.values())
relevant = sum(source_not_target_counts.values())
return _get_fbeta_score(true_positives, selected, relevant, beta=beta)
|
Compute the SARI score for a single prediction and one or more targets.
Args:
source_ids: a list / np.array of SentencePiece IDs
prediction_ids: a list / np.array of SentencePiece IDs
list_of_targets: a list of target ID lists / np.arrays
max_gram_size: int. largest n-gram size we care about (e.g. 3 for unigrams,
bigrams, and trigrams)
beta_for_deletion: beta for deletion F score.
Returns:
the SARI score and its three components: add, keep, and deletion scores
def get_sari_score(source_ids, prediction_ids, list_of_targets,
max_gram_size=4, beta_for_deletion=0):
"""Compute the SARI score for a single prediction and one or more targets.
Args:
source_ids: a list / np.array of SentencePiece IDs
prediction_ids: a list / np.array of SentencePiece IDs
list_of_targets: a list of target ID lists / np.arrays
max_gram_size: int. largest n-gram size we care about (e.g. 3 for unigrams,
bigrams, and trigrams)
beta_for_deletion: beta for deletion F score.
Returns:
the SARI score and its three components: add, keep, and deletion scores
"""
addition_scores = []
keep_scores = []
deletion_scores = []
for n in range(1, max_gram_size + 1):
source_counts = _get_ngram_counter(source_ids, n)
prediction_counts = _get_ngram_counter(prediction_ids, n)
# All ngrams in the targets with count 1.
target_counts = collections.Counter()
# All ngrams in the targets with count r/num_targets, where r is the number
# of targets where the ngram occurs.
weighted_target_counts = collections.Counter()
num_nonempty_targets = 0
for target_ids_i in list_of_targets:
target_counts_i = _get_ngram_counter(target_ids_i, n)
if target_counts_i:
weighted_target_counts += target_counts_i
num_nonempty_targets += 1
for gram in weighted_target_counts.keys():
weighted_target_counts[gram] /= num_nonempty_targets
target_counts[gram] = 1
keep_scores.append(get_keep_score(source_counts, prediction_counts,
weighted_target_counts))
deletion_scores.append(get_deletion_score(source_counts, prediction_counts,
weighted_target_counts,
beta_for_deletion))
addition_scores.append(get_addition_score(source_counts, prediction_counts,
target_counts))
avg_keep_score = sum(keep_scores) / max_gram_size
avg_addition_score = sum(addition_scores) / max_gram_size
avg_deletion_score = sum(deletion_scores) / max_gram_size
sari = (avg_keep_score + avg_addition_score + avg_deletion_score) / 3.0
return sari, avg_keep_score, avg_addition_score, avg_deletion_score
|
Computes the SARI scores from the given source, prediction and targets.
Args:
source_ids: A 2D tf.Tensor of size (batch_size , sequence_length)
prediction_ids: A 2D tf.Tensor of size (batch_size, sequence_length)
target_ids: A 3D tf.Tensor of size (batch_size, number_of_targets,
sequence_length)
max_gram_size: int. largest n-gram size we care about (e.g. 3 for unigrams,
bigrams, and trigrams)
Returns:
A 4-tuple of 1D float Tensors of size (batch_size) for the SARI score and
the keep, addition and deletion scores.
def get_sari(source_ids, prediction_ids, target_ids, max_gram_size=4):
"""Computes the SARI scores from the given source, prediction and targets.
Args:
source_ids: A 2D tf.Tensor of size (batch_size , sequence_length)
prediction_ids: A 2D tf.Tensor of size (batch_size, sequence_length)
target_ids: A 3D tf.Tensor of size (batch_size, number_of_targets,
sequence_length)
max_gram_size: int. largest n-gram size we care about (e.g. 3 for unigrams,
bigrams, and trigrams)
Returns:
A 4-tuple of 1D float Tensors of size (batch_size) for the SARI score and
the keep, addition and deletion scores.
"""
def get_sari_numpy(source_ids, prediction_ids, target_ids):
"""Iterate over elements in the batch and call the SARI function."""
sari_scores = []
keep_scores = []
add_scores = []
deletion_scores = []
# Iterate over elements in the batch.
for source_ids_i, prediction_ids_i, target_ids_i in zip(
source_ids, prediction_ids, target_ids):
sari, keep, add, deletion = get_sari_score(
source_ids_i, prediction_ids_i, target_ids_i, max_gram_size,
BETA_FOR_SARI_DELETION_F_MEASURE)
sari_scores.append(sari)
keep_scores.append(keep)
add_scores.append(add)
deletion_scores.append(deletion)
return (np.asarray(sari_scores), np.asarray(keep_scores),
np.asarray(add_scores), np.asarray(deletion_scores))
sari, keep, add, deletion = tf.py_func(
get_sari_numpy,
[source_ids, prediction_ids, target_ids],
[tf.float64, tf.float64, tf.float64, tf.float64])
return sari, keep, add, deletion
|
Computes the SARI scores from the given source, prediction and targets.
An approximate SARI scoring method since we do not glue word pieces or
decode the ids and tokenize the output. By default, we use ngram order of 4.
Also, this does not have beam search.
Args:
predictions: tensor, model predictions.
labels: tensor, gold output.
features: dict, containing inputs.
Returns:
sari: int, approx sari score
def sari_score(predictions, labels, features, **unused_kwargs):
"""Computes the SARI scores from the given source, prediction and targets.
An approximate SARI scoring method since we do not glue word pieces or
decode the ids and tokenize the output. By default, we use ngram order of 4.
Also, this does not have beam search.
Args:
predictions: tensor, model predictions.
labels: tensor, gold output.
features: dict, containing inputs.
Returns:
sari: int, approx sari score
"""
if "inputs" not in features:
raise ValueError("sari_score requires inputs feature")
# Convert the inputs and outputs to a [batch_size, sequence_length] tensor.
inputs = tf.squeeze(features["inputs"], axis=[-1, -2])
outputs = tf.to_int32(tf.argmax(predictions, axis=-1))
outputs = tf.squeeze(outputs, axis=[-1, -2])
# Convert the labels to a [batch_size, 1, sequence_length] tensor.
labels = tf.squeeze(labels, axis=[-1, -2])
labels = tf.expand_dims(labels, axis=1)
score, _, _, _ = get_sari(inputs, outputs, labels)
return score, tf.constant(1.0)
|
Download all MNIST files to directory unless they are there.
def _get_mnist(directory):
"""Download all MNIST files to directory unless they are there."""
for filename in [
_MNIST_TRAIN_DATA_FILENAME, _MNIST_TRAIN_LABELS_FILENAME,
_MNIST_TEST_DATA_FILENAME, _MNIST_TEST_LABELS_FILENAME
]:
generator_utils.maybe_download(directory, filename, _MNIST_URL + filename)
|
Extract images from an MNIST file into a numpy array.
Args:
filename: The path to an MNIST images file.
num_images: The number of images in the file.
Returns:
A numpy array of shape [number_of_images, height, width, channels].
def _extract_mnist_images(filename, num_images):
"""Extract images from an MNIST file into a numpy array.
Args:
filename: The path to an MNIST images file.
num_images: The number of images in the file.
Returns:
A numpy array of shape [number_of_images, height, width, channels].
"""
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(_MNIST_IMAGE_SIZE * _MNIST_IMAGE_SIZE * num_images)
data = np.frombuffer(buf, dtype=np.uint8)
data = data.reshape(num_images, _MNIST_IMAGE_SIZE, _MNIST_IMAGE_SIZE, 1)
return data
|
Extract labels from an MNIST file into integers.
Args:
filename: The path to an MNIST labels file.
num_labels: The number of labels in the file.
Returns:
A int64 numpy array of shape [num_labels]
def _extract_mnist_labels(filename, num_labels):
"""Extract labels from an MNIST file into integers.
Args:
filename: The path to an MNIST labels file.
num_labels: The number of labels in the file.
Returns:
A int64 numpy array of shape [num_labels]
"""
with gzip.open(filename) as bytestream:
bytestream.read(8)
buf = bytestream.read(num_labels)
labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)
return labels
|
Image generator for MNIST.
Args:
tmp_dir: path to temporary storage directory.
training: a Boolean; if true, we use the train set, otherwise the test set.
how_many: how many images and labels to generate.
data_filename: file that contains features data.
label_filename: file that contains labels.
start_from: from which image to start.
Returns:
An instance of image_generator that produces MNIST images.
def mnist_common_generator(tmp_dir,
training,
how_many,
data_filename,
label_filename,
start_from=0):
"""Image generator for MNIST.
Args:
tmp_dir: path to temporary storage directory.
training: a Boolean; if true, we use the train set, otherwise the test set.
how_many: how many images and labels to generate.
data_filename: file that contains features data.
label_filename: file that contains labels.
start_from: from which image to start.
Returns:
An instance of image_generator that produces MNIST images.
"""
data_path = os.path.join(tmp_dir, data_filename)
labels_path = os.path.join(tmp_dir, label_filename)
images = _extract_mnist_images(data_path, 60000 if training else 10000)
labels = _extract_mnist_labels(labels_path, 60000 if training else 10000)
# Shuffle the data to make sure classes are well distributed.
data = list(zip(images, labels))
random.shuffle(data)
images, labels = list(zip(*data))
return image_utils.image_generator(images[start_from:start_from + how_many],
labels[start_from:start_from + how_many])
|
Image generator for MNIST.
Args:
tmp_dir: path to temporary storage directory.
training: a Boolean; if true, we use the train set, otherwise the test set.
how_many: how many images and labels to generate.
start_from: from which image to start.
Returns:
An instance of image_generator that produces MNIST images.
def mnist_generator(tmp_dir, training, how_many, start_from=0):
"""Image generator for MNIST.
Args:
tmp_dir: path to temporary storage directory.
training: a Boolean; if true, we use the train set, otherwise the test set.
how_many: how many images and labels to generate.
start_from: from which image to start.
Returns:
An instance of image_generator that produces MNIST images.
"""
_get_mnist(tmp_dir)
d = _MNIST_TRAIN_DATA_FILENAME if training else _MNIST_TEST_DATA_FILENAME
l = _MNIST_TRAIN_LABELS_FILENAME if training else _MNIST_TEST_LABELS_FILENAME
return mnist_common_generator(tmp_dir, training, how_many, d, l, start_from)
|
Download all FashionMNIST files to directory unless they are there.
def _get_fashion_mnist(directory):
"""Download all FashionMNIST files to directory unless they are there."""
# Fashion mnist files have the same names as MNIST.
# We must choose a separate name (by adding 'fashion-' prefix) in the tmp_dir.
for filename in [
_MNIST_TRAIN_DATA_FILENAME, _MNIST_TRAIN_LABELS_FILENAME,
_MNIST_TEST_DATA_FILENAME, _MNIST_TEST_LABELS_FILENAME
]:
generator_utils.maybe_download(directory,
_FASHION_MNIST_LOCAL_FILE_PREFIX + filename,
_FASHION_MNIST_URL + filename)
|
Image generator for FashionMNIST.
Args:
tmp_dir: path to temporary storage directory.
training: a Boolean; if true, we use the train set, otherwise the test set.
how_many: how many images and labels to generate.
start_from: from which image to start.
Returns:
An instance of image_generator that produces MNIST images.
def fashion_mnist_generator(tmp_dir, training, how_many, start_from=0):
"""Image generator for FashionMNIST.
Args:
tmp_dir: path to temporary storage directory.
training: a Boolean; if true, we use the train set, otherwise the test set.
how_many: how many images and labels to generate.
start_from: from which image to start.
Returns:
An instance of image_generator that produces MNIST images.
"""
_get_fashion_mnist(tmp_dir)
d = _FASHION_MNIST_LOCAL_FILE_PREFIX + (
_MNIST_TRAIN_DATA_FILENAME if training else _MNIST_TEST_DATA_FILENAME)
l = _FASHION_MNIST_LOCAL_FILE_PREFIX + (
_MNIST_TRAIN_LABELS_FILENAME if training else _MNIST_TEST_LABELS_FILENAME)
return mnist_common_generator(tmp_dir, training, how_many, d, l, start_from)
|
Generates synthetic timeseries using input parameters.
Each generated timeseries has timeseries_length data points.
Parameters for each timeseries are specified by timeseries_params.
Args:
timeseries_length: Number of data points to generate for each timeseries.
timeseries_params: Parameters used to generate the timeseries. The following
parameters need to be specified for each timeseries:
m = Slope of the timeseries used to compute the timeseries trend.
b = y-intercept of the timeseries used to compute the timeseries trend.
A = Timeseries amplitude used to compute timeseries period.
freqcoeff = Frequency coefficient used to compute timeseries period.
rndA = Random amplitude used to inject noise into the timeseries.
fn = Base timeseries function (np.cos or np.sin).
Example params for two timeseries.
[{"m": 0.006, "b": 300.0, "A":50.0, "freqcoeff":1500.0, "rndA":15.0,
"fn": np.sin},
{"m": 0.000, "b": 500.0, "A":35.0, "freqcoeff":3500.0, "rndA":25.0,
"fn": np.cos}]
Returns:
Multi-timeseries (list of list).
def generate_data(timeseries_length, timeseries_params):
"""Generates synthetic timeseries using input parameters.
Each generated timeseries has timeseries_length data points.
Parameters for each timeseries are specified by timeseries_params.
Args:
timeseries_length: Number of data points to generate for each timeseries.
timeseries_params: Parameters used to generate the timeseries. The following
parameters need to be specified for each timeseries:
m = Slope of the timeseries used to compute the timeseries trend.
b = y-intercept of the timeseries used to compute the timeseries trend.
A = Timeseries amplitude used to compute timeseries period.
freqcoeff = Frequency coefficient used to compute timeseries period.
rndA = Random amplitude used to inject noise into the timeseries.
fn = Base timeseries function (np.cos or np.sin).
Example params for two timeseries.
[{"m": 0.006, "b": 300.0, "A":50.0, "freqcoeff":1500.0, "rndA":15.0,
"fn": np.sin},
{"m": 0.000, "b": 500.0, "A":35.0, "freqcoeff":3500.0, "rndA":25.0,
"fn": np.cos}]
Returns:
Multi-timeseries (list of list).
"""
x = range(timeseries_length)
multi_timeseries = []
for p in timeseries_params:
# Trend
y1 = [p["m"] * i + p["b"] for i in x]
# Period
y2 = [p["A"] * p["fn"](i / p["freqcoeff"]) for i in x]
# Noise
y3 = np.random.normal(0, p["rndA"], timeseries_length).tolist()
# Sum of Trend, Period and Noise. Replace negative values with zero.
y = [max(a + b + c, 0) for a, b, c in zip(y1, y2, y3)]
multi_timeseries.append(y)
return multi_timeseries
|
Basic 2-frame conv model with stochastic tower.
def next_frame_basic_stochastic():
"""Basic 2-frame conv model with stochastic tower."""
hparams = basic_deterministic_params.next_frame_basic_deterministic()
hparams.stochastic_model = True
hparams.add_hparam("latent_channels", 1)
hparams.add_hparam("latent_std_min", -5.0)
hparams.add_hparam("num_iterations_1st_stage", 15000)
hparams.add_hparam("num_iterations_2nd_stage", 15000)
hparams.add_hparam("latent_loss_multiplier", 1e-3)
hparams.add_hparam("latent_loss_multiplier_dynamic", False)
hparams.add_hparam("latent_loss_multiplier_alpha", 1e-5)
hparams.add_hparam("latent_loss_multiplier_epsilon", 1.0)
hparams.add_hparam("latent_loss_multiplier_schedule", "constant")
hparams.add_hparam("latent_num_frames", 0) # 0 means use all frames.
hparams.add_hparam("anneal_end", 50000)
hparams.add_hparam("information_capacity", 0.0)
return hparams
|
Basic 2-frame conv model with stochastic tower.
def next_frame_sampling_stochastic():
"""Basic 2-frame conv model with stochastic tower."""
hparams = basic_deterministic_params.next_frame_sampling()
hparams.stochastic_model = True
hparams.add_hparam("latent_channels", 1)
hparams.add_hparam("latent_std_min", -5.0)
hparams.add_hparam("num_iterations_1st_stage", 15000)
hparams.add_hparam("num_iterations_2nd_stage", 15000)
hparams.add_hparam("latent_loss_multiplier", 1e-3)
hparams.add_hparam("latent_loss_multiplier_dynamic", False)
hparams.add_hparam("latent_loss_multiplier_alpha", 1e-5)
hparams.add_hparam("latent_loss_multiplier_epsilon", 1.0)
hparams.add_hparam("latent_loss_multiplier_schedule", "constant")
hparams.add_hparam("latent_num_frames", 0) # 0 means use all frames.
hparams.add_hparam("anneal_end", 40000)
hparams.add_hparam("information_capacity", 0.0)
return hparams
|
Basic 2-frame conv model with stochastic discrete latent.
def next_frame_basic_stochastic_discrete():
"""Basic 2-frame conv model with stochastic discrete latent."""
hparams = basic_deterministic_params.next_frame_sampling()
hparams.batch_size = 4
hparams.video_num_target_frames = 6
hparams.scheduled_sampling_mode = "prob_inverse_lin"
hparams.scheduled_sampling_decay_steps = 40000
hparams.scheduled_sampling_max_prob = 1.0
hparams.dropout = 0.15
hparams.filter_double_steps = 3
hparams.hidden_size = 96
hparams.learning_rate_constant = 0.002
hparams.learning_rate_warmup_steps = 2000
hparams.learning_rate_schedule = "linear_warmup * constant"
hparams.concat_internal_states = True
hparams.video_modality_loss_cutoff = 0.03
hparams.add_hparam("bottleneck_bits", 128)
hparams.add_hparam("bottleneck_noise", 0.1)
hparams.add_hparam("discretize_warmup_steps", 40000)
hparams.add_hparam("latent_rnn_warmup_steps", 40000)
hparams.add_hparam("latent_rnn_max_sampling", 0.5)
hparams.add_hparam("latent_use_max_probability", 0.8)
hparams.add_hparam("full_latent_tower", False)
hparams.add_hparam("latent_predictor_state_size", 128)
hparams.add_hparam("latent_predictor_temperature", 1.0)
hparams.add_hparam("complex_addn", True)
hparams.add_hparam("recurrent_state_size", 64)
return hparams
|
Next frame stochastic discrete tuning grid.
def next_frame_stochastic_discrete_range(rhp):
"""Next frame stochastic discrete tuning grid."""
rhp.set_float("learning_rate_constant", 0.001, 0.01)
rhp.set_float("dropout", 0.2, 0.6)
rhp.set_int("filter_double_steps", 3, 5)
rhp.set_discrete("hidden_size", [64, 96, 128])
rhp.set_discrete("bottleneck_bits", [32, 64, 128, 256])
rhp.set_discrete("video_num_target_frames", [4])
rhp.set_float("bottleneck_noise", 0.0, 0.2)
|
Map the function f to the nested structure x (dicts, tuples, lists).
def nested_map(x, f):
"""Map the function f to the nested structure x (dicts, tuples, lists)."""
if isinstance(x, list):
return [nested_map(y, f) for y in x]
if isinstance(x, tuple):
return tuple([nested_map(y, f) for y in x])
if isinstance(x, dict):
return {k: nested_map(x[k], f) for k in x}
return f(x)
|
Get a structure of shapes for a structure of nested arrays.
def shapes(x):
"""Get a structure of shapes for a structure of nested arrays."""
def shape(x):
try:
return x.shape
except Exception: # pylint: disable=broad-except
return []
return nested_map(x, shape)
|
Get a structure of sizes for a structure of nested arrays.
def sizes(x):
"""Get a structure of sizes for a structure of nested arrays."""
def size(x):
try:
return x.size
except Exception: # pylint: disable=broad-except
return 0
return nested_map(x, size)
|
Find the frame with the caller on the stack.
def _find_frame(stack, start=0):
"""Find the frame with the caller on the stack."""
# We want to find the first place where the layer was called
# that is *not* an __init__ function of an inheriting layer.
frame = inspect.getframeinfo(stack[start][0])
# If we are in an init, move on.
if frame.function == '__init__':
return _find_frame(stack, start + 1)
return frame
|
Shorten file path in error lines for more readable tracebacks.
def _shorten_file_path(line):
"""Shorten file path in error lines for more readable tracebacks."""
start = line.lower().find('file')
if start < 0:
return line
first_quote = line.find('"', start)
if first_quote < 0:
return line
second_quote = line.find('"', first_quote + 1)
if second_quote < 0:
return line
path = line[first_quote + 1:second_quote]
new_path = '/'.join(path.split('/')[-3:])
return line[:first_quote] + '[...]/' + new_path + line[second_quote + 1:]
|
Cleaned-up form of traceback.
def _short_traceback(skip=3):
"""Cleaned-up form of traceback."""
counter, res = 0, []
# Skipping 3 lines by default: the top (useless) and self-call.
lines = traceback.format_exc().splitlines()[skip:]
for l in lines:
res.append(_shorten_file_path(l))
if counter % 2 == 1:
res.append('')
counter += 1
# If we see a LayerError, the traceback has already been processed.
if l.startswith('LayerError'):
# Skip 4 back except last as these are internal base-layer calls.
res = res[:-4] + [res[-1]]
res += lines[counter:]
break
return '\n'.join(res)
|
Create a layer class from a function.
def layer(output_shape=None, new_parameters=None):
"""Create a layer class from a function."""
def layer_decorator(call):
"""Decorating the call function."""
def output_shape_fun(self, input_shape):
if output_shape is None:
return input_shape
kwargs = self._init_kwargs # pylint: disable=protected-access
return output_shape(input_shape, **kwargs)
def new_parameters_fun(self, input_shape, rng):
if new_parameters is None:
return ()
kwargs = self._init_kwargs # pylint: disable=protected-access
return new_parameters(input_shape, rng, **kwargs)
def call_fun(self, x, params=(), **kwargs):
"""The call function of the created class, derived from call."""
# Merge on-call kwargs with class-kwargs.
call_kwargs = kwargs.copy()
call_kwargs.update(self._init_kwargs) # pylint: disable=protected-access
# Call with the merged kwargs.
return call(x, params=params, **call_kwargs)
# Set doc for python help.
call_fun.__doc__ = call.__doc__
if output_shape is None:
output_shape_fun.__doc__ = output_shape.__doc__
if new_parameters is None:
new_parameters_fun.__doc__ = new_parameters.__doc__
# Create the class.
cls = type(call.__name__, (Layer,),
{'call': call_fun,
'output_shape': output_shape_fun,
'new_parameters': new_parameters_fun})
return cls
return layer_decorator
|
Initialize the layer given an input shape and rng.
Returns new_parameters(input_shape, rng) on the first call and () on any
subsequent call, as the layer is already initialized. This is used for
networks that share parameters, so the layer only produces them once.
Note that all arguments and return values can be tuples or dictionaries
or arbitraty nested structures composed of tuples and dictionaries.
Args:
input_shape: a tuple representing the shape of the input.
rng: random number generator.
Returns:
Newly created parameters on the first call and () on all subsequent calls.
def initialize(self, input_shape, rng):
"""Initialize the layer given an input shape and rng.
Returns new_parameters(input_shape, rng) on the first call and () on any
subsequent call, as the layer is already initialized. This is used for
networks that share parameters, so the layer only produces them once.
Note that all arguments and return values can be tuples or dictionaries
or arbitraty nested structures composed of tuples and dictionaries.
Args:
input_shape: a tuple representing the shape of the input.
rng: random number generator.
Returns:
Newly created parameters on the first call and () on all subsequent calls.
"""
try:
# Re-using this layer, no new parameters.
if not self._first_init:
return ()
# First call of this layer, create parameters.
self._first_init = False
self._params = self.new_parameters(input_shape, rng)
return self._params
except Exception:
name, trace = self.__class__.__name__, _short_traceback()
raise LayerError(name, 'initialize', self._caller, input_shape, trace)
|
Returns dict<str ref_url, str ref_content>.
def _references_content(ref_files):
"""Returns dict<str ref_url, str ref_content>."""
example_spec = {
"url": tf.FixedLenFeature([], tf.string),
"content": tf.FixedLenFeature([], tf.string),
}
data = {}
for ex in generator_utils.tfrecord_iterator(
ref_files, gzipped=True, example_spec=example_spec):
data[ex["url"]] = text_encoder.to_unicode(ex["content"])
return data
|
Urls for chunk: dict<str wiki_url, list<str> ref_urls>.
def _wiki_urls_for_shard(shard_id, urls_dir=None):
"""Urls for chunk: dict<str wiki_url, list<str> ref_urls>."""
urls_dir = urls_dir or WIKI_URLS_DIR
urls_filepath = os.path.join(urls_dir, WIKI_URLS_FILE % shard_id)
with tf.gfile.GFile(urls_filepath) as f:
return json.loads(f.read())
|
Generates WikipediaArticles from GCS that are part of shard shard_id.
def _wiki_articles(shard_id, wikis_dir=None):
"""Generates WikipediaArticles from GCS that are part of shard shard_id."""
if not wikis_dir:
wikis_dir = WIKI_CONTENT_DIR
with tf.Graph().as_default():
dataset = tf.data.TFRecordDataset(
cc_utils.readahead(
os.path.join(wikis_dir, WIKI_CONTENT_FILE % shard_id)),
buffer_size=16 * 1000 * 1000)
def _parse_example(ex_ser):
"""Parse serialized Example containing Wikipedia article content."""
features = {
"url": tf.VarLenFeature(tf.string),
"title": tf.VarLenFeature(tf.string),
"section_titles": tf.VarLenFeature(tf.string),
"section_texts": tf.VarLenFeature(tf.string),
}
ex = tf.parse_single_example(ex_ser, features)
for k in ex.keys():
ex[k] = ex[k].values
ex["url"] = ex["url"][0]
ex["title"] = ex["title"][0]
return ex
dataset = dataset.map(_parse_example, num_parallel_calls=32)
dataset = dataset.prefetch(100)
record_it = dataset.make_one_shot_iterator().get_next()
with tf.Session() as sess:
while True:
try:
ex = sess.run(record_it)
except tf.errors.OutOfRangeError:
break
sections = [
WikipediaSection(title=text_encoder.to_unicode(title),
text=text_encoder.to_unicode(text))
for title, text in zip(ex["section_titles"], ex["section_texts"])
]
yield WikipediaArticle(
url=text_encoder.to_unicode(ex["url"]),
title=text_encoder.to_unicode(ex["title"]),
sections=sections)
|
Rank and return reference paragraphs by tf-idf score on title tokens.
def rank_reference_paragraphs(wiki_title, references_content, normalize=True):
"""Rank and return reference paragraphs by tf-idf score on title tokens."""
normalized_title = _normalize_text(wiki_title)
title_tokens = _tokens_to_score(
set(tokenizer.encode(text_encoder.native_to_unicode(normalized_title))))
ref_paragraph_info = []
doc_counts = collections.defaultdict(int)
for ref in references_content:
for paragraph in ref.split("\n"):
normalized_paragraph = _normalize_text(paragraph)
if cc_utils.filter_paragraph(normalized_paragraph):
# Skip paragraph
continue
counts = _token_counts(normalized_paragraph, title_tokens)
for token in title_tokens:
if counts[token]:
doc_counts[token] += 1
content = normalized_paragraph if normalize else paragraph
info = {"content": content, "counts": counts}
ref_paragraph_info.append(info)
for info in ref_paragraph_info:
score = 0.
for token in title_tokens:
term_frequency = info["counts"][token]
inv_doc_frequency = (
float(len(ref_paragraph_info)) / max(doc_counts[token], 1))
score += term_frequency * math.log(inv_doc_frequency)
info["score"] = score
ref_paragraph_info.sort(key=lambda el: el["score"], reverse=True)
return [info["content"] for info in ref_paragraph_info]
|
Produce examples from shard_ids to out_filepaths.
def produce_examples(shard_ids, wikis_dir, refs_dir, urls_dir, vocab_path,
out_filepaths):
"""Produce examples from shard_ids to out_filepaths."""
# * Join the Wikipedia articles with their references
# * Run Tf-idf to sort reference paragraphs
# * Encode the Wikipedia and reference text with the vocabulary
# * Write out TFRecords of tensorflow.Example
tf.logging.info("Processing %d input shards into %d output files.",
len(shard_ids), len(out_filepaths))
vocab = text_encoder.SubwordTextEncoder(vocab_path)
eot_ids = vocab.encode(EOT)
def example_generator():
"""Generate Example dicts."""
stats = dict(total_original_wikis=0, total_original_refs=0,
total_found_refs=0, ref_lengths=[], wiki_original_refs=[],
wiki_found_refs=[], wikis_skipped_no_refs=0,
wikis_skipped_short_lead=0, num_wikis_written=0)
ref_files_by_shard = _references_files_by_shard(refs_dir)
for shard_id in shard_ids:
tf.logging.info("Processing shard %d", shard_id)
wiki_urls = _wiki_urls_for_shard(shard_id, urls_dir)
tf.logging.info("Loaded wiki URLs for shard")
refs_content = _references_content(ref_files_by_shard[shard_id])
tf.logging.info("Loaded reference content for shard")
for i, wiki in enumerate(_wiki_articles(shard_id, wikis_dir)):
if not i % 1000:
tf.logging.info("Processing wiki index %d for shard %d", i, shard_id)
stats["total_original_wikis"] += 1
# Get reference content
wiki_ref_content = []
ref_urls = wiki_urls[wiki.url]["refs"]
stats["total_original_refs"] += len(ref_urls)
stats_wiki_original_refs = len(ref_urls)
stats_wiki_found_refs = 0
for ref_url in ref_urls:
ref_content = refs_content.get(ref_url)
if not ref_content:
continue
stats["total_found_refs"] += 1
stats["ref_lengths"].append(len(ref_content))
stats_wiki_found_refs += 1
wiki_ref_content.append(ref_content)
stats["wiki_original_refs"].append(stats_wiki_original_refs)
stats["wiki_found_refs"].append(stats_wiki_found_refs)
if not wiki_ref_content or len(wiki_ref_content) < _MIN_REFS:
# No/few refs were found
stats["wikis_skipped_no_refs"] += 1
continue
# Rank reference paragraphs with TFIDF
wiki_title = _normalize_text(wiki.title)
ranked_paragraphs = rank_reference_paragraphs(wiki_title,
wiki_ref_content)
# Construct inputs from Wiki title and references
inputs = []
inputs.extend(vocab.encode(wiki_title))
inputs.extend(eot_ids)
for paragraph in ranked_paragraphs:
if len(inputs) >= 1e6:
break
paragraph += " "
inputs.extend(vocab.encode(paragraph))
# Construct targets from article sections
targets, section_boundaries = _encode_wiki_sections(
wiki.sections, vocab)
# Skip if lead section is too short
if (not section_boundaries or
section_boundaries[0] < _MIN_LEADSECTION_TOKENS):
stats["wikis_skipped_short_lead"] += 1
continue
inputs.append(text_encoder.EOS_ID)
targets.append(text_encoder.EOS_ID)
stats["num_wikis_written"] += 1
yield {
"inputs": inputs,
"targets": targets,
"section_boundaries": section_boundaries,
}
tf.logging.info("Total: %d, Skipped: %d",
stats["num_wikis_written"],
stats["total_original_wikis"] - stats["num_wikis_written"])
tf.logging.info("Total refs: %d, Skipped refs: %d",
stats["total_found_refs"],
stats["total_original_refs"] - stats["total_found_refs"])
stats_fname = os.path.join(os.path.split(out_filepaths[0])[0],
"stats.%d.json" % shard_ids[0])
with tf.gfile.Open(stats_fname, "w") as f:
f.write(json.dumps(stats))
generator_utils.generate_files(example_generator(), out_filepaths)
|
Encodes sections with vocab. Returns ids and section boundaries.
def _encode_wiki_sections(sections, vocab):
"""Encodes sections with vocab. Returns ids and section boundaries."""
ids = []
section_boundaries = []
for i, section in enumerate(sections):
if i > 0:
# Skip including article title
ids.extend(vocab.encode(_format_title(_normalize_text(section.title))))
ids.extend(vocab.encode(_normalize_text(section.text)))
section_boundaries.append(len(ids))
return ids, section_boundaries
|
Extract references from WET files into sharded output files.
def extract_references_from_wets(wet_files, metadata_dir, out_dir,
tmp_dir=None):
"""Extract references from WET files into sharded output files."""
# Setup output files
shard_files = make_ref_shard_files(out_dir)
num_refs = 0
for i, wet_file in enumerate(wet_files):
num_refs_in_wet = 0
tf.logging.info("Processing file %d", i)
# Read metadata file
metadata_fname = os.path.join(
metadata_dir, os.path.basename(wet_file)) + cc_utils.METADTA_SUFFIX
with tf.gfile.Open(cc_utils.readahead(metadata_fname)) as f:
wet_metadata = json.loads(f.read())
if not wet_metadata:
# No references in this WET file
continue
if wet_file.startswith("http"):
# download
if not tmp_dir:
tmp_dir = tempfile.gettempdir()
record_gen = cc_utils.wet_records_from_url(wet_file, tmp_dir)
else:
# local
record_gen = cc_utils.wet_records_from_file_obj(
cc_utils.gzip_memfile(wet_file), take_ownership=True)
for wet_record in record_gen:
shard_ids = wet_metadata.get(wet_record.url)
if not shard_ids:
# URL not in dataset
continue
# Serialize and write out
ex = _make_example_from_record(wet_record)
ex_str = ex.SerializeToString()
for shard_id in shard_ids:
shard_files[shard_id].write(ex_str)
num_refs += 1
num_refs_in_wet += 1
tf.logging.info("Wrote out %d references for this WET", num_refs_in_wet)
tf.logging.info("Wrote out %d references total", num_refs)
# Cleanup
for shard_file in shard_files:
shard_file.close()
|
Extract pages from an xml dump.
Args:
dump: a unicode string
Returns:
a list of unicode strings
def _dump_to_pages(dump):
"""Extract pages from an xml dump.
Args:
dump: a unicode string
Returns:
a list of unicode strings
"""
pos = 0
ret = []
start_tag = u"<page>\n"
end_tag = u"</page>\n"
while True:
start_pos = dump.find(start_tag, pos)
if start_pos == -1:
break
start_pos += len(start_tag)
end_pos = dump.find(end_tag, start_pos)
if end_pos == -1:
break
ret.append(dump[start_pos:end_pos])
pos = end_pos + len(end_tag)
return ret
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.