text
stringlengths
81
112k
Turn x_bit representing numbers bitwise (lower-endian) to int tensor. Args: x_bit: Tensor containing numbers in a particular base to be converted to int. num_bits: Number of bits in the representation. base: Base of the representation. Returns: Integer representation of this number. def bit_to_int(x_bit, num_bits, base=2): """Turn x_bit representing numbers bitwise (lower-endian) to int tensor. Args: x_bit: Tensor containing numbers in a particular base to be converted to int. num_bits: Number of bits in the representation. base: Base of the representation. Returns: Integer representation of this number. """ x_l = tf.stop_gradient(tf.to_int32(tf.reshape(x_bit, [-1, num_bits]))) x_labels = [ x_l[:, i] * tf.to_int32(base)**tf.to_int32(i) for i in range(num_bits)] res = sum(x_labels) return tf.to_int32(tf.reshape(res, common_layers.shape_list(x_bit)[:-1]))
Turn x_int into a bitwise (lower-endian) tensor and embed densly. def int_to_bit_embed(x_int, num_bits, embedding_size, base=2): """Turn x_int into a bitwise (lower-endian) tensor and embed densly.""" shape = common_layers.shape_list(x_int) inputs = int_to_bit(x_int, num_bits, base=base) inputs = tf.reshape(inputs, shape[:-1] + [shape[-1] * 8]) inputs = 2.0 * tf.to_float(inputs) - 1.0 # Move from 0/1 to -1/1. return tf.layers.dense(inputs, embedding_size, name="int_to_bit_embed")
Embedding function that takes discrete latent and returns embedding. Args: x: Input to the discretization bottleneck. hidden_size: Dimension of the latent state. z_size: Number of bits, where discrete codes range from 1 to 2**z_size. filter_size: Dimension to project embedding by. Used only if bottleneck_kind is semhash. bottleneck_kind: Kind of discretization bottleneck to use; one of dvq, semhash, gumbel-softmax (Default: dvq). soft_em: If True then it uses a multi-sample version of EM (Default: False). num_blocks: Number of blocks in DVQ (Default: 2). num_residuals: Number of residuals (Default: 1). block_v_size: Number of embedding entries per block (Default: None). means: The embedding table for dvq (Default: None). name: Name for the bottleneck scope. Returns: Continuous embedding to be passed on to the decoder. Raises: ValueError: For unknown or missing arguments. def embed(x, hidden_size, z_size, filter_size, bottleneck_kind="dvq", soft_em=False, num_blocks=2, num_residuals=1, block_v_size=None, means=None, name=None): """Embedding function that takes discrete latent and returns embedding. Args: x: Input to the discretization bottleneck. hidden_size: Dimension of the latent state. z_size: Number of bits, where discrete codes range from 1 to 2**z_size. filter_size: Dimension to project embedding by. Used only if bottleneck_kind is semhash. bottleneck_kind: Kind of discretization bottleneck to use; one of dvq, semhash, gumbel-softmax (Default: dvq). soft_em: If True then it uses a multi-sample version of EM (Default: False). num_blocks: Number of blocks in DVQ (Default: 2). num_residuals: Number of residuals (Default: 1). block_v_size: Number of embedding entries per block (Default: None). means: The embedding table for dvq (Default: None). name: Name for the bottleneck scope. Returns: Continuous embedding to be passed on to the decoder. Raises: ValueError: For unknown or missing arguments. """ with tf.variable_scope(name, default_name="embed", reuse=tf.AUTO_REUSE): if bottleneck_kind == "semhash": c = int_to_bit(x, z_size) h1a = tf.layers.dense(c, filter_size, name="vch1a") h1b = tf.layers.dense(1.0 - c, filter_size, name="vch1b") h1 = h1a + h1b elif bottleneck_kind == "gumbel-softmax": hot = tf.one_hot(x, 2**z_size) h1 = tf.layers.dense(hot, hidden_size, name="dae_dense") elif bottleneck_kind in ["dvq", "gumbel-softmax-dvq"]: if block_v_size is None: raise ValueError("Bottleneck kind is dvq but block_v_size is None.") if soft_em: assert num_residuals == 1 x_hot_flat = tf.reshape(x, shape=[-1, num_blocks, block_v_size]) h1 = tf.matmul(tf.transpose(x_hot_flat, perm=[1, 0, 2]), means[0]) h1 = tf.transpose(h1, perm=[1, 0, 2]) new_shape = common_layers.shape_list(x) new_shape[-1] = hidden_size h1 = tf.reshape(h1, shape=new_shape) else: shape_x = common_layers.shape_list(x) x_flat = tf.reshape(x, [-1, 1]) c = int_to_bit(x_flat, num_bits=z_size, base=2) shape = common_layers.shape_list(c) new_shape = shape new_shape[-1] = num_residuals new_shape.append(num_blocks) new_shape.append(int(z_size / (num_residuals * num_blocks))) c = tf.to_int32(tf.reshape(c, shape=new_shape)) h1_shape = shape_x h1_shape.append(hidden_size) h1 = tf.zeros(dtype=tf.float32, shape=h1_shape) for i in range(num_residuals): c_residual = bit_to_int( c[:, :, i, :, :], num_bits=int(z_size / (num_residuals * num_blocks)), base=2) c_hot = tf.one_hot(c_residual, depth=block_v_size, axis=-1) c_hot_flat = tf.reshape(c_hot, shape=[-1, num_blocks, block_v_size]) h1_residual = tf.matmul( tf.transpose(c_hot_flat, perm=[1, 0, 2]), means[i]) h1_residual = tf.transpose(h1_residual, perm=[1, 0, 2]) h1_residual = tf.reshape(h1_residual, shape=h1_shape) h1 += h1_residual elif bottleneck_kind == "rounding": h1 = x else: raise ValueError("Unknown bottleneck kind.") return h1
Simple variational autoencoder without discretization. Args: x: Input to the discretization bottleneck. z_size: Number of bits, where discrete codes range from 1 to 2**z_size. name: Name for the bottleneck scope. Returns: Embedding function, latent, loss, mu and log_simga. def vae(x, z_size, name=None): """Simple variational autoencoder without discretization. Args: x: Input to the discretization bottleneck. z_size: Number of bits, where discrete codes range from 1 to 2**z_size. name: Name for the bottleneck scope. Returns: Embedding function, latent, loss, mu and log_simga. """ with tf.variable_scope(name, default_name="vae"): mu = tf.layers.dense(x, z_size, name="mu") log_sigma = tf.layers.dense(x, z_size, name="log_sigma") shape = common_layers.shape_list(x) epsilon = tf.random_normal([shape[0], shape[1], 1, z_size]) z = mu + tf.exp(log_sigma / 2) * epsilon kl = 0.5 * tf.reduce_mean( tf.expm1(log_sigma) + tf.square(mu) - log_sigma, axis=-1) free_bits = z_size // 4 kl_loss = tf.reduce_mean(tf.maximum(kl - free_bits, 0.0)) return z, kl_loss, mu, log_sigma
Sample from the Gumbel distribution, protect from overflows. Args: shape: Shape of Gumbel samples. Returns: Noise drawn from Gumbel distribution. def gumbel_sample(shape): """Sample from the Gumbel distribution, protect from overflows. Args: shape: Shape of Gumbel samples. Returns: Noise drawn from Gumbel distribution. """ uniform_samples = tf.random_uniform(shape, minval=0.00001, maxval=0.99998) return -tf.log(-tf.log(uniform_samples))
Gumbel softmax discretization bottleneck. Args: x: Input to the discretization bottleneck. z_size: Number of bits, where discrete codes range from 1 to 2**z_size. mode: tf.estimator.ModeKeys. softmax_k: If > 0 then do top-k softmax. temperature_warmup_steps: Number of steps it takes to decay temperature to 0. summary: Whether to write summaries. name: Name for the bottleneck scope. Returns: Embedding function, discrete code, and loss. def gumbel_softmax(x, z_size, mode, softmax_k=0, temperature_warmup_steps=150000, summary=True, name=None): """Gumbel softmax discretization bottleneck. Args: x: Input to the discretization bottleneck. z_size: Number of bits, where discrete codes range from 1 to 2**z_size. mode: tf.estimator.ModeKeys. softmax_k: If > 0 then do top-k softmax. temperature_warmup_steps: Number of steps it takes to decay temperature to 0. summary: Whether to write summaries. name: Name for the bottleneck scope. Returns: Embedding function, discrete code, and loss. """ with tf.variable_scope(name, default_name="gumbel_softmax"): m = tf.layers.dense(x, 2**z_size, name="mask") if softmax_k > 0: m, kl = top_k_softmax(m, softmax_k) return m, m, 1.0 - tf.reduce_mean(kl) logsm = tf.nn.log_softmax(m) # Gumbel-softmax sample. gumbel_samples = gumbel_sample(common_layers.shape_list(m)) steps = temperature_warmup_steps gumbel_samples *= common_layers.inverse_exp_decay(steps // 5) * 0.5 temperature = 1.2 - common_layers.inverse_lin_decay(steps) # 10% of the time keep reasonably high temperature to keep learning. temperature = tf.cond( tf.less(tf.random_uniform([]), 0.9), lambda: temperature, lambda: tf.random_uniform([], minval=0.5, maxval=1.0)) s = tf.nn.softmax((logsm + gumbel_samples) / temperature) m = tf.nn.softmax(m) kl = -tf.reduce_max(logsm, axis=-1) if summary: tf.summary.histogram("max-log", tf.reshape(kl, [-1])) # Calculate the argmax and construct hot vectors. maxvec = tf.reshape(tf.argmax(m, axis=-1), [-1]) maxvhot = tf.stop_gradient(tf.one_hot(maxvec, 2**z_size)) # Add losses that prevent too few being used. distrib = tf.reshape(logsm, [-1, 2**z_size]) * maxvhot d_mean = tf.reduce_mean(distrib, axis=[0], keep_dims=True) d_variance = tf.reduce_mean( tf.squared_difference(distrib, d_mean), axis=[0]) d_dev = -tf.reduce_mean(d_variance) ret = s if mode != tf.estimator.ModeKeys.TRAIN: ret = tf.reshape(maxvhot, common_layers.shape_list(s)) # Just hot @eval. return m, ret, d_dev * 5.0 + tf.reduce_mean(kl) * 0.002
Discretization bottleneck. Args: inputs: Input to the bottleneck, a Tensor of shape [..., channels]. hidden_size: Dimension of the dense output. z_size: Number of bits, where discrete codes range from 1 to 2**z_size. filter_size: Filter size in the embedding function. mode: tf.estimator.ModeKeys. bottleneck_kind: Kind of discretization bottleneck. One of dense, dvq (decomposed vector quantization), gumbel-softmax, gumbel-softmax-dvq, semhash, or vae. num_blocks: Number of blocks. Used only if bottleneck_kind is DVQ. num_residuals: Number of residual units used to compute nearest neighbors. Used only if bottleneck_kind is DVQ. reshape_method: Method to reshape. Used only if bottleneck_kind is DVQ. projection_tensors: If the reshape method is project, then these are the tensors used to project. beta: Scale factor for codebook loss and EMA. Used only if bottleneck_kind is DVQ. ema: Whether to update embeddings using exponential moving averages. Used only if bottleneck_kind is DVQ. means: The embedding table. Used only if ema is True. ema_count: Table of counts for each embedding corresponding to how many examples in a batch it was the closest to. Used only if ema is True. ema_means: Exponentially averaged version of the embeddings. Used only if ema is True. epsilon: Small value to avoid dividing by zero in EMA update. Used only if ema is True. decay: Decay factor for the exponential moving average. Used only if ema is True. random_top_k: Noisy top-k. Used only if bottleneck_kind is DVQ. soft_em: Whether to use soft EM or hard EM. Used only if bottleneck_kind is DVQ. num_samples: Number of samples for soft EM. Used only if soft_em is True. softmax_k: If > 0 then do top-k softmax. Used only if bottleneck_kind is gumbel-softmax. temperature_warmup_steps: Number of steps it takes to decay temperature to 0. Used only if bottleneck_kind is gumbel-softmax or gumbel-softmax-dvq. do_hard_gumbel_softmax: Whether to use hard or soft Gumbel-Softmax samples. Used only if bottleneck_kind is gumbel-softmax-dvq. num_flows: Number of inverse autoregresive flows. Used only if bottleneck_kind is gumbel-softmax-dvq. approximate_gs_entropy: Whether to approximate the Gumbel-Softmax density as a categorical distribution when calculating the sample entropy. Used only if bottleneck_kind is gumbel-softmax-dvq. sum_over_latents: Whether to sum over all non-batch dimensions before taking mean of entropy loss term. Used only if bottleneck kind is DVQ or gumbel-softmax-dvq. discrete_mix: Factor for mixing discrete and non-discrete input. Used only if bottleneck_kind is semhash. noise_dev: Noise stddev. Used only if bottleneck_kind is semhash. startup_steps: Number of steps after which latent predictor is trained. Used only if bottleneck_kind is semhash. summary: Whether to write summaries. name: Name for the bottleneck scope. cond: A tf.bool condition on whether to update the codebook. Returns: outputs_dense: Tensor of shape [..., output_dim]. The output dimension is hidden_size if bottleneck_kind is gumbel-softmax, DVQ; filter_size if bottleneck_kind is dense, semhash, vae. If bottleneck_kind is DVQ, outputs_dense represents the codebook (means) indexed by outputs_discrete. outputs_discrete: Tensor of shape [...]. Discrete codes, each an index in [0, 2**z_size). It uses the hot representation if soft_em is True. extra_loss: Scalar Tensor. Sum of codebook and commitment losses if bottleneck_kind is DVQ; else zero. embed_fn: Function embed with arguments partially filled in. neg_q_entropy: Scalar Tensor representing negative entropy of variational approximation (0 if it is deterministic). Raises: ValueError: If projection_tensors is None for reshape_method project, or ema_count or ema_means is None if ema is True, or unknown args. def discrete_bottleneck(inputs, hidden_size, z_size, filter_size, mode=None, bottleneck_kind="dvq", num_blocks=2, num_residuals=1, reshape_method="slice", projection_tensors=None, beta=0.25, ema=True, means=None, ema_count=None, ema_means=None, epsilon=1e-5, decay=0.999, random_top_k=1, soft_em=False, num_samples=1, softmax_k=0, temperature_warmup_steps=150000, do_hard_gumbel_softmax=False, num_flows=0, approximate_gs_entropy=False, sum_over_latents=False, discrete_mix=0.5, noise_dev=1., startup_steps=50000, summary=True, name=None, cond=True): """Discretization bottleneck. Args: inputs: Input to the bottleneck, a Tensor of shape [..., channels]. hidden_size: Dimension of the dense output. z_size: Number of bits, where discrete codes range from 1 to 2**z_size. filter_size: Filter size in the embedding function. mode: tf.estimator.ModeKeys. bottleneck_kind: Kind of discretization bottleneck. One of dense, dvq (decomposed vector quantization), gumbel-softmax, gumbel-softmax-dvq, semhash, or vae. num_blocks: Number of blocks. Used only if bottleneck_kind is DVQ. num_residuals: Number of residual units used to compute nearest neighbors. Used only if bottleneck_kind is DVQ. reshape_method: Method to reshape. Used only if bottleneck_kind is DVQ. projection_tensors: If the reshape method is project, then these are the tensors used to project. beta: Scale factor for codebook loss and EMA. Used only if bottleneck_kind is DVQ. ema: Whether to update embeddings using exponential moving averages. Used only if bottleneck_kind is DVQ. means: The embedding table. Used only if ema is True. ema_count: Table of counts for each embedding corresponding to how many examples in a batch it was the closest to. Used only if ema is True. ema_means: Exponentially averaged version of the embeddings. Used only if ema is True. epsilon: Small value to avoid dividing by zero in EMA update. Used only if ema is True. decay: Decay factor for the exponential moving average. Used only if ema is True. random_top_k: Noisy top-k. Used only if bottleneck_kind is DVQ. soft_em: Whether to use soft EM or hard EM. Used only if bottleneck_kind is DVQ. num_samples: Number of samples for soft EM. Used only if soft_em is True. softmax_k: If > 0 then do top-k softmax. Used only if bottleneck_kind is gumbel-softmax. temperature_warmup_steps: Number of steps it takes to decay temperature to 0. Used only if bottleneck_kind is gumbel-softmax or gumbel-softmax-dvq. do_hard_gumbel_softmax: Whether to use hard or soft Gumbel-Softmax samples. Used only if bottleneck_kind is gumbel-softmax-dvq. num_flows: Number of inverse autoregresive flows. Used only if bottleneck_kind is gumbel-softmax-dvq. approximate_gs_entropy: Whether to approximate the Gumbel-Softmax density as a categorical distribution when calculating the sample entropy. Used only if bottleneck_kind is gumbel-softmax-dvq. sum_over_latents: Whether to sum over all non-batch dimensions before taking mean of entropy loss term. Used only if bottleneck kind is DVQ or gumbel-softmax-dvq. discrete_mix: Factor for mixing discrete and non-discrete input. Used only if bottleneck_kind is semhash. noise_dev: Noise stddev. Used only if bottleneck_kind is semhash. startup_steps: Number of steps after which latent predictor is trained. Used only if bottleneck_kind is semhash. summary: Whether to write summaries. name: Name for the bottleneck scope. cond: A tf.bool condition on whether to update the codebook. Returns: outputs_dense: Tensor of shape [..., output_dim]. The output dimension is hidden_size if bottleneck_kind is gumbel-softmax, DVQ; filter_size if bottleneck_kind is dense, semhash, vae. If bottleneck_kind is DVQ, outputs_dense represents the codebook (means) indexed by outputs_discrete. outputs_discrete: Tensor of shape [...]. Discrete codes, each an index in [0, 2**z_size). It uses the hot representation if soft_em is True. extra_loss: Scalar Tensor. Sum of codebook and commitment losses if bottleneck_kind is DVQ; else zero. embed_fn: Function embed with arguments partially filled in. neg_q_entropy: Scalar Tensor representing negative entropy of variational approximation (0 if it is deterministic). Raises: ValueError: If projection_tensors is None for reshape_method project, or ema_count or ema_means is None if ema is True, or unknown args. """ if bottleneck_kind in ["dvq", "gumbel-softmax-dvq"]: assert means is not None if hidden_size % num_blocks != 0: raise ValueError("num_blocks does not divide hidden size") if z_size % num_residuals != 0: raise ValueError("num_residuals does not divide embedding table size") z_size_per_residual = int(z_size / num_residuals) if z_size_per_residual % num_blocks != 0: raise ValueError("num_blocks does not divide embedding table size") block_v_size = 2**int(z_size_per_residual / num_blocks) if ema: if ema_count is None: raise ValueError("ema_count is None but ema is True") if ema_means is None: raise ValueError("ema_means is None but ema is True") else: block_v_size = None with tf.variable_scope( name, default_name="discrete_bottleneck", reuse=tf.AUTO_REUSE): embed_fn = partial( embed, hidden_size=hidden_size, z_size=z_size, filter_size=filter_size, bottleneck_kind=bottleneck_kind, soft_em=soft_em, num_blocks=num_blocks, num_residuals=num_residuals, block_v_size=block_v_size, means=means, name=name) if bottleneck_kind == "dense": # Note discrete output is continuous here. outputs_discrete = tf.layers.dense(inputs, z_size, name="vcc") outputs_dense = tf.layers.dense( outputs_discrete, filter_size, name="vch1") extra_loss = tf.constant(0.0) neg_q_entropy = tf.constant(0.0) elif bottleneck_kind in ["dvq", "gumbel-softmax-dvq"]: inputs_3d = inputs if len(inputs.shape) == 4: inputs_3d = tf.squeeze(inputs, axis=2) if reshape_method == "slice": x_reshaped = slice_hidden( inputs_3d, hidden_size=hidden_size, num_blocks=num_blocks) elif reshape_method == "project": if projection_tensors is None: raise ValueError( "Projection tensors is None for reshape_method project") x_reshaped = project_hidden( inputs_3d, projection_tensors=projection_tensors, hidden_size=hidden_size, num_blocks=num_blocks) else: raise ValueError("Unknown reshape_method") x_res = tf.reshape(x_reshaped, [-1] + common_layers.shape_list(x_reshaped)[2:]) x_means_hot = [] x_means = 0 extra_loss = 0 for i in range(num_residuals): x_means_hot_res, x_means_res, q_loss_res, e_loss_res, neg_q_entropy = ( embedding_lookup( x_reshaped, means=means[i], num_blocks=num_blocks, block_v_size=block_v_size, bottleneck_kind=bottleneck_kind, random_top_k=random_top_k, soft_em=soft_em, num_samples=num_samples, temperature_warmup_steps=temperature_warmup_steps, do_hard_gumbel_softmax=do_hard_gumbel_softmax, num_flows=num_flows, approximate_gs_entropy=approximate_gs_entropy, sum_over_latents=sum_over_latents)) # Update the EMA variables. if ema: tf.logging.info("Using EMA with beta = {}".format(beta)) updated_ema_count_res = moving_averages.assign_moving_average( ema_count[i], tf.where(cond, tf.reduce_sum( tf.reshape(x_means_hot_res, shape=[-1, num_blocks, block_v_size]), axis=0), ema_count[i]), decay, zero_debias=False) dw = tf.matmul( tf.transpose(x_means_hot_res, perm=[1, 2, 0]), tf.transpose(x_res, perm=[1, 0, 2])) updated_ema_means_res = moving_averages.assign_moving_average( ema_means[i], tf.where(cond, dw, ema_means[i]), decay, zero_debias=False) n = tf.reduce_sum(updated_ema_count_res, axis=-1, keep_dims=True) updated_ema_count_res = ( (updated_ema_count_res + epsilon) / (n + 2**z_size * epsilon) * n) # pylint: disable=g-no-augmented-assignment updated_ema_means_res = updated_ema_means_res / tf.expand_dims( updated_ema_count_res, axis=-1) # pylint: enable=g-no-augmented-assignment with tf.control_dependencies([e_loss_res]): update_means_res = tf.assign(means[i], tf.where(cond, updated_ema_means_res, means[i])) with tf.control_dependencies([update_means_res]): extra_loss += beta * e_loss_res else: extra_loss += q_loss_res + beta * e_loss_res # Update the residuals. x_res -= x_means_res x_means += x_means_res x_means_hot.append(x_means_hot_res) # Get the discrete latent representation. x_means_hot = tf.stack(x_means_hot, axis=1) x_means_idx = tf.argmax(x_means_hot, axis=-1) # Get the binary representation. x_means_bits = int_to_bit( x_means_idx, num_bits=int(z_size / (num_residuals * num_blocks)), base=2) shape = common_layers.shape_list(x_means_bits) new_shape = shape[:-2] new_shape[-1] = z_size x_means_bits = tf.reshape(x_means_bits, shape=new_shape) outputs_discrete = bit_to_int( tf.to_int32(x_means_bits), num_bits=z_size, base=2) # Adjust shape of discrete outputs. inputs_shape = common_layers.shape_list(inputs) outputs_discrete = tf.reshape(outputs_discrete, inputs_shape[:-1]) # If we're using soft EM then set discretes to the hot representation. if soft_em: outputs_discrete = x_means_hot outputs_discrete = tf.reshape(outputs_discrete, inputs_shape[:-1] + [block_v_size]) # Reshape assuming hidden_size == inputs_shape[:-1]. x_means = tf.reshape(x_means, inputs_shape) outputs_dense = inputs + tf.stop_gradient(x_means - inputs) elif bottleneck_kind == "gumbel-softmax": _, outputs_hot, extra_loss = gumbel_softmax( inputs, z_size=z_size, mode=mode, softmax_k=softmax_k, temperature_warmup_steps=temperature_warmup_steps, summary=summary, name=name) outputs_discrete = tf.argmax(outputs_hot, axis=-1) outputs_dense = tf.layers.dense( outputs_hot, hidden_size, name="dae_dense") neg_q_entropy = tf.constant(0.0) elif bottleneck_kind == "semhash": outputs_discrete = tf.layers.dense(inputs, z_size, name="vcc") y_clean = common_layers.saturating_sigmoid(outputs_discrete) if summary: tf.summary.histogram("y_clean", tf.reshape(y_clean, [-1])) if noise_dev > 0 and mode == tf.estimator.ModeKeys.TRAIN: noise = tf.truncated_normal( common_layers.shape_list(outputs_discrete), mean=0.0, stddev=noise_dev) y = common_layers.saturating_sigmoid(outputs_discrete + noise) else: y = y_clean d = tf.to_float(tf.less(0.5, y)) y_discrete = tf.stop_gradient(d) + y - tf.stop_gradient(y) pd = common_layers.inverse_exp_decay(startup_steps * 2) pd *= discrete_mix pd = pd if mode == tf.estimator.ModeKeys.TRAIN else 1.0 c = tf.where( tf.less(tf.random_uniform([common_layers.shape_list(y)[0]]), pd), y_discrete, y) outputs_dense_a = tf.layers.dense(c, filter_size, name="vch1a") outputs_dense_b = tf.layers.dense(1.0 - c, filter_size, name="vch1b") outputs_dense = outputs_dense_a + outputs_dense_b dx = tf.to_int32(tf.stop_gradient(d)) outputs_discrete = bit_to_int(dx, z_size) extra_loss = tf.constant(0.0) neg_q_entropy = tf.constant(0.0) elif bottleneck_kind == "vae": outputs_discrete, extra_loss, _, _ = vae(inputs, z_size, name="vae") outputs_dense = tf.layers.dense( outputs_discrete, filter_size, name="vch1") neg_q_entropy = tf.constant(0.0) else: raise ValueError("Unknown discretization method.") return outputs_dense, outputs_discrete, extra_loss, embed_fn, neg_q_entropy
Predict a sequence of bits (a latent) with LSTM, both training and infer. Given a tensor on which the predictions are based (prediction_source), we use a single-layer LSTM with state of size state_size to predict total_num_bits, which we predict in groups of size bits_at_once. During training, we use target_bits as input to the LSTM (teacher forcing) and return the target_bits together with the prediction loss. During inference, we sample with the given temperature and return the predicted sequence and loss 0. Args: prediction_source: a Tensor of shape [batch_size, ...] used to create the initial state and the first input to the LSTM. state_size: python integer, the size of the LSTM state. total_num_bits: python integer, how many bits in total to predict. target_bits: a tensor of shape [batch_size, total_num_bits] used during training as the target to predict; each element should be -1 or 1. extra_inputs: a Tensor [batch_size, total_num_bits // bits_at_once, d] of additional inputs, passed as additional LSTM inputs. bits_at_once: pytho integer, how many bits to predict at once. temperature: python float, temperature used for sampling during inference. dropout: float, the amount of dropout to aply during training (0.1 default). Returns: a pair (bits, loss) with the predicted bit sequence, which is a Tensor of shape [batch_size, total_num_bits] with elements either -1 or 1, and a loss used to train the predictions against the provided target_bits. def predict_bits_with_lstm(prediction_source, state_size, total_num_bits, target_bits=None, extra_inputs=None, bits_at_once=8, temperature=1.0, dropout=0.1): """Predict a sequence of bits (a latent) with LSTM, both training and infer. Given a tensor on which the predictions are based (prediction_source), we use a single-layer LSTM with state of size state_size to predict total_num_bits, which we predict in groups of size bits_at_once. During training, we use target_bits as input to the LSTM (teacher forcing) and return the target_bits together with the prediction loss. During inference, we sample with the given temperature and return the predicted sequence and loss 0. Args: prediction_source: a Tensor of shape [batch_size, ...] used to create the initial state and the first input to the LSTM. state_size: python integer, the size of the LSTM state. total_num_bits: python integer, how many bits in total to predict. target_bits: a tensor of shape [batch_size, total_num_bits] used during training as the target to predict; each element should be -1 or 1. extra_inputs: a Tensor [batch_size, total_num_bits // bits_at_once, d] of additional inputs, passed as additional LSTM inputs. bits_at_once: pytho integer, how many bits to predict at once. temperature: python float, temperature used for sampling during inference. dropout: float, the amount of dropout to aply during training (0.1 default). Returns: a pair (bits, loss) with the predicted bit sequence, which is a Tensor of shape [batch_size, total_num_bits] with elements either -1 or 1, and a loss used to train the predictions against the provided target_bits. """ with tf.variable_scope("predict_bits_with_lstm"): # Layers and cell state creation. lstm_cell = tf.nn.rnn_cell.LSTMCell(state_size) discrete_predict = tf.layers.Dense(2**bits_at_once, name="discrete_predict") discrete_embed = tf.layers.Dense(state_size, name="discrete_embed") batch_size = common_layers.shape_list(prediction_source)[0] layer_pred = tf.layers.flatten(prediction_source) first_lstm_input = tf.layers.dense(layer_pred, state_size, name="istate") c_state = tf.layers.dense(layer_pred, state_size, name="cstate") m_state = tf.layers.dense(layer_pred, state_size, name="mstate") state = (c_state, m_state) # Prediction mode if no targets are given. if target_bits is None: outputs = [] lstm_input = first_lstm_input for i in range(total_num_bits // bits_at_once): if extra_inputs is not None: lstm_input = tf.concat([lstm_input, extra_inputs[:, i, :]], axis=1) output, state = lstm_cell(lstm_input, state) discrete_logits = discrete_predict(output) discrete_samples = common_layers.sample_with_temperature( discrete_logits, temperature) outputs.append(tf.expand_dims(discrete_samples, axis=1)) lstm_input = discrete_embed(tf.one_hot(discrete_samples, 256)) outputs = tf.concat(outputs, axis=1) outputs = int_to_bit(outputs, bits_at_once) outputs = tf.reshape(outputs, [batch_size, total_num_bits]) return 2 * outputs - 1, 0.0 # Training mode, calculating loss. assert total_num_bits % bits_at_once == 0 target_bits = tf.reshape(tf.maximum(tf.stop_gradient(target_bits), 0), [ batch_size, total_num_bits // bits_at_once, bits_at_once]) target_ints = bit_to_int(target_bits, bits_at_once) tf.summary.histogram("target_integers", tf.reshape(target_ints, [-1])) target_hot = tf.one_hot(target_ints, 2**bits_at_once, axis=-1) target_embedded = discrete_embed(target_hot) target_embedded = tf.nn.dropout(target_embedded, 1.0 - dropout) teacher_input = tf.concat( [tf.expand_dims(first_lstm_input, axis=1), target_embedded], axis=1) outputs = [] for i in range(total_num_bits // bits_at_once): lstm_input = teacher_input[:, i, :] if extra_inputs is not None: lstm_input = tf.concat([lstm_input, extra_inputs[:, i, :]], axis=1) output, state = lstm_cell(lstm_input, state) outputs.append(tf.expand_dims(output, axis=1)) outputs = tf.concat(outputs, axis=1) outputs = tf.nn.dropout(outputs, 1.0 - dropout) d_int_pred = discrete_predict(outputs) pred_loss = tf.losses.sparse_softmax_cross_entropy( logits=d_int_pred, labels=target_ints) pred_loss = tf.reduce_mean(pred_loss) return d_int_pred, pred_loss
Get lookup table for VQ bottleneck. def get_vq_codebook(codebook_size, hidden_size): """Get lookup table for VQ bottleneck.""" with tf.variable_scope("vq", reuse=tf.AUTO_REUSE): means = tf.get_variable( name="means", shape=[codebook_size, hidden_size], initializer=tf.uniform_unit_scaling_initializer()) ema_count = tf.get_variable( name="ema_count", shape=[codebook_size], initializer=tf.constant_initializer(0), trainable=False) with tf.colocate_with(means): ema_means = tf.get_variable( name="ema_means", initializer=means.initialized_value(), trainable=False) return means, ema_means, ema_count
Find the nearest element in means to elements in x. def vq_nearest_neighbor(x, means, soft_em=False, num_samples=10, temperature=None): """Find the nearest element in means to elements in x.""" bottleneck_size = common_layers.shape_list(means)[0] x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keepdims=True) means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keepdims=True) scalar_prod = tf.matmul(x, means, transpose_b=True) dist = x_norm_sq + tf.transpose(means_norm_sq) - 2 * scalar_prod if soft_em: x_means_idx = tf.multinomial(-dist, num_samples=num_samples) x_means_hot = tf.one_hot( x_means_idx, depth=common_layers.shape_list(means)[0]) x_means_hot = tf.reduce_mean(x_means_hot, axis=1) else: if temperature is None: x_means_idx = tf.argmax(-dist, axis=-1) else: x_means_idx = tf.multinomial(- dist / temperature, 1) x_means_idx = tf.squeeze(x_means_idx, axis=-1) if (common_layers.should_generate_summaries() and not common_layers.is_xla_compiled()): tf.summary.histogram("means_idx", tf.reshape(x_means_idx, [-1])) x_means_hot = tf.one_hot(x_means_idx, bottleneck_size) x_means_hot_flat = tf.reshape(x_means_hot, [-1, bottleneck_size]) x_means = tf.matmul(x_means_hot_flat, means) e_loss = tf.reduce_mean(tf.squared_difference(x, tf.stop_gradient(x_means))) return x_means_hot, e_loss, dist
Simple vector quantized discrete bottleneck. def vq_discrete_bottleneck(x, bottleneck_bits, beta=0.25, decay=0.999, epsilon=1e-5, soft_em=False, num_samples=10): """Simple vector quantized discrete bottleneck.""" bottleneck_size = 2**bottleneck_bits x_means_hot, e_loss, _ = vq_body( x, bottleneck_size, beta=beta, decay=decay, epsilon=epsilon, soft_em=soft_em, num_samples=num_samples) return x_means_hot, e_loss
Discretize each x into one of codebook_size codes. def vq_body(x, codebook_size, beta=0.25, decay=0.999, epsilon=1e-5, soft_em=False, num_samples=10, temperature=None, do_update=True): """Discretize each x into one of codebook_size codes.""" x_shape = common_layers.shape_list(x) hidden_size = x_shape[-1] means, ema_means, ema_count = get_vq_codebook(codebook_size, hidden_size) x = tf.reshape(x, [-1, hidden_size]) x_means_hot, e_loss, distances = vq_nearest_neighbor( x, means, soft_em=soft_em, num_samples=num_samples, temperature=temperature) def loss_with_update(): """Update the ema variables and return loss triggering the update.""" updated_ema_count = moving_averages.assign_moving_average( ema_count, tf.reduce_sum(tf.reshape(x_means_hot, shape=[-1, codebook_size]), axis=0), decay, zero_debias=False) dw = tf.matmul(x_means_hot, x, transpose_a=True) updated_ema_means = tf.identity( moving_averages.assign_moving_average( ema_means, dw, decay, zero_debias=False)) n = tf.reduce_sum(updated_ema_count, axis=-1, keepdims=True) updated_ema_count = ( (updated_ema_count + epsilon) / (n + codebook_size * epsilon) * n) updated_ema_means /= tf.expand_dims(updated_ema_count, axis=-1) with tf.control_dependencies([e_loss]): update_means = means.assign(updated_ema_means) with tf.control_dependencies([update_means]): return beta * e_loss # Loss, also do update if requested. if do_update: loss = loss_with_update() else: loss = tf.cond(do_update, loss_with_update, lambda: beta * e_loss) d = tf.reshape(x_means_hot, x_shape[:-1] + [codebook_size]) return d, loss, distances
Compute the loss of large vocab tensors using a VQAE codebook. Args: x: Tensor of inputs to be quantized to nearest code targets: Tensor of target indices to target codes codebook_size: Size of quantization codebook beta: scalar float for moving averages decay: scalar float for moving averages epsilon: scalar float for moving averages soft_em: boolean, whether to apply a soft sampling procedure num_samples: if soft_em, number of samples to take temperature: temperature if we want to sample nearest neighbors or None do_update: whether to update the means; True by default, can be a Tensor Returns: discrete_x: one-hot Tensor indicating which codebook element is closest to x x_means: Tensor, on the forward pass: closest codebook element to x, on the backwards pass: soft convex-combination of codebook elements by proximity to x target_means: the codebook elements corresponding to the targets code_loss: loss driving x closer to its nearest codebook element targets_loss: cross-entropy loss driving x closer to code corresponding to target def vq_loss(x, targets, codebook_size, beta=0.25, decay=0.999, epsilon=1e-5, soft_em=False, num_samples=10, temperature=None, do_update=True): """Compute the loss of large vocab tensors using a VQAE codebook. Args: x: Tensor of inputs to be quantized to nearest code targets: Tensor of target indices to target codes codebook_size: Size of quantization codebook beta: scalar float for moving averages decay: scalar float for moving averages epsilon: scalar float for moving averages soft_em: boolean, whether to apply a soft sampling procedure num_samples: if soft_em, number of samples to take temperature: temperature if we want to sample nearest neighbors or None do_update: whether to update the means; True by default, can be a Tensor Returns: discrete_x: one-hot Tensor indicating which codebook element is closest to x x_means: Tensor, on the forward pass: closest codebook element to x, on the backwards pass: soft convex-combination of codebook elements by proximity to x target_means: the codebook elements corresponding to the targets code_loss: loss driving x closer to its nearest codebook element targets_loss: cross-entropy loss driving x closer to code corresponding to target """ x_shape = common_layers.shape_list(x) target_shape = common_layers.shape_list(targets) hidden_size = x_shape[-1] means, _, _ = get_vq_codebook(codebook_size, hidden_size) x = tf.reshape(x, [-1, hidden_size]) targets = tf.reshape(targets, [-1]) one_hot_targets = tf.one_hot(targets, codebook_size) target_means = tf.matmul(one_hot_targets, means) discrete_x, code_loss, distances = vq_body( x, codebook_size, beta=beta, decay=decay, epsilon=epsilon, soft_em=soft_em, num_samples=num_samples, temperature=temperature, do_update=do_update) logits = -distances targets_loss = tf.losses.sparse_softmax_cross_entropy( logits=logits, labels=targets) targets_loss = tf.reduce_mean(targets_loss) x_means = tf.matmul(discrete_x, means) x_means = x + tf.stop_gradient(x_means - x) discrete_x = tf.reshape(discrete_x, x_shape[:-1] + [codebook_size]) target_means = tf.reshape(target_means, target_shape + [hidden_size]) return discrete_x, x_means, target_means, code_loss, targets_loss
Simple undiscretization from vector quantized representation. def vq_discrete_unbottleneck(x, hidden_size): """Simple undiscretization from vector quantized representation.""" x_shape = common_layers.shape_list(x) x = tf.to_float(x) bottleneck_size = common_layers.shape_list(x)[-1] means, _, _ = get_vq_codebook(bottleneck_size, hidden_size) result = tf.matmul(tf.reshape(x, [-1, x_shape[-1]]), means) return tf.reshape(result, x_shape[:-1] + [hidden_size])
Sample from Gumbel-Softmax and compute neighbors and losses. Args: x: A `float`-like `Tensor` of shape [batch_size, latent_dim, num_blocks, block_dim] containing the latent vectors to be compared to the codebook. means: Embedding table of shape [num_blocks, block_v_size, block_dim]. block_v_size: Number of discrete codes per block. hard: Determines whether we take hard or soft Gumbel-Softmax samples (Default: False). temperature_init: Initial temperature used for Gumbel-Softmax samples, after it which it decays to 0 (Default: 1.2). num_samples: Number of samples drawn for each latent (Default: 1). temperature_warmup_steps: Number of steps it takes to decay temperature to 0 (Default: 150000). summary: When `True`, we save histogram summaries of the KL term (Default: True). num_flows: Number of inverse autoregressive flows with Gumbel-Softmax samples. approximate_gs_entropy: When `True`, we approximate Gumbel-Softmax density as categorical when calculating sample entropy (Default: False). sum_over_latents: Whether to sum over non-batch dimensions when calculating negative entropy loss. Returns: x_means_assignments: A `float`-like `Tensor` containing the codebook assignments, averaged over samples, with shape [batch_size * latent_dim, num_blocks, block_v_size]. neg_q_entropy: The negative entropy of the variational distribution, averaged over samples. def gumbel_softmax_nearest_neighbor_dvq(x, means, block_v_size, hard=False, temperature_init=1.2, num_samples=1, temperature_warmup_steps=150000, summary=True, num_flows=0, approximate_gs_entropy=False, sum_over_latents=False): """Sample from Gumbel-Softmax and compute neighbors and losses. Args: x: A `float`-like `Tensor` of shape [batch_size, latent_dim, num_blocks, block_dim] containing the latent vectors to be compared to the codebook. means: Embedding table of shape [num_blocks, block_v_size, block_dim]. block_v_size: Number of discrete codes per block. hard: Determines whether we take hard or soft Gumbel-Softmax samples (Default: False). temperature_init: Initial temperature used for Gumbel-Softmax samples, after it which it decays to 0 (Default: 1.2). num_samples: Number of samples drawn for each latent (Default: 1). temperature_warmup_steps: Number of steps it takes to decay temperature to 0 (Default: 150000). summary: When `True`, we save histogram summaries of the KL term (Default: True). num_flows: Number of inverse autoregressive flows with Gumbel-Softmax samples. approximate_gs_entropy: When `True`, we approximate Gumbel-Softmax density as categorical when calculating sample entropy (Default: False). sum_over_latents: Whether to sum over non-batch dimensions when calculating negative entropy loss. Returns: x_means_assignments: A `float`-like `Tensor` containing the codebook assignments, averaged over samples, with shape [batch_size * latent_dim, num_blocks, block_v_size]. neg_q_entropy: The negative entropy of the variational distribution, averaged over samples. """ batch_size, latent_dim, num_blocks, block_dim = common_layers.shape_list(x) # Combine latent_dim and batch_size for computing distances. x = tf.reshape(x, [-1, num_blocks, block_dim]) # Compute distances using (x - means)**2 = x**2 + means**2 - 2*x*means. x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keepdims=True) means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keepdims=True) means_norm_sq = tf.transpose(means_norm_sq, perm=[2, 0, 1]) scalar_prod = tf.matmul( tf.transpose(x, perm=[1, 0, 2]), tf.transpose(means, perm=[0, 2, 1])) scalar_prod = tf.transpose(scalar_prod, perm=[1, 0, 2]) dist = x_norm_sq + means_norm_sq - 2 * scalar_prod # IAF requires latents to have their own dimension, so reshape dist from # [batch_size * latent_dim, num_blocks, block_v_size] to # [batch_size * num_blocks, latent_dim, block_v_size]. dist = tf.reshape(dist, [batch_size, latent_dim, num_blocks, -1]) dist = tf.reshape( tf.transpose(dist, perm=[0, 2, 1, 3]), [-1, latent_dim, block_v_size]) log_class_probs = tf.nn.log_softmax(-dist) sample_shape = [num_samples] + common_layers.shape_list(dist) gumbel_samples = gumbel_sample(sample_shape) # Temperature decays linearly. temperature = temperature_init - common_layers.inverse_lin_decay( temperature_warmup_steps) # 10% of the time keep reasonably high temperature to keep learning. temperature = tf.cond( tf.less(tf.random_uniform([]), 0.9), lambda: temperature, lambda: tf.random_uniform([], minval=0.5, maxval=1.0)) gumbel_softmax_samples = tf.nn.softmax( (tf.expand_dims(log_class_probs, 0) + gumbel_samples) / temperature) q_samples = tf.clip_by_value(gumbel_softmax_samples, 1e-6, 1 - 1e-6) if approximate_gs_entropy: q_dist = tfp.distributions.Multinomial(total_count=1.0, logits=-dist) else: q_dist = tfp.distributions.RelaxedOneHotCategorical( temperature, logits=-dist) # Take mean over samples to approximate entropy. neg_q_entropy = tf.reduce_mean(q_dist.log_prob(q_samples), 0) if summary: tf.summary.histogram("neg_q_entropy", tf.reshape(neg_q_entropy, [-1])) if sum_over_latents: neg_q_entropy = tf.reshape(neg_q_entropy, [batch_size, num_blocks, latent_dim]) neg_q_entropy = tf.reduce_sum(neg_q_entropy, [1, 2]) neg_q_entropy = tf.reduce_mean(neg_q_entropy) if num_flows > 0: hparams = iaf_hparams(hidden_size=512, filter_size=4096) q_samples = tf.reshape(q_samples, [-1, latent_dim, block_v_size]) for flow in range(num_flows): shifted_samples = tf.pad(q_samples, [[0, 0], [1, 0], [0, 0]])[:, :-1, :] # Project samples from [batch_size, latent_size, block_v_size] to # [batch_size, latent_size, hidden_size]. shifted_samples = common_layers.dense(shifted_samples, hparams.hidden_size) # TODO(vafa): Include masking as a flag. mask = True if mask: attention_type = cia.AttentionType.LOCAL_1D else: attention_type = cia.AttentionType.GLOBAL ffn_output = cia.transformer_decoder_layers( inputs=shifted_samples, encoder_output=None, num_layers=6, hparams=hparams, attention_type=attention_type, name="transformer_" + str(flow)) # Project samples back to [batch_size, latent_size, block_v_size]. ffn_output = common_layers.dense(ffn_output, block_v_size) log_pi = tf.nn.log_softmax(ffn_output) # Flow 1: Adding log_pi to q_samples and dividing by the temperature. # Note that we drop the last dimension of q_samples for centered-softmax, # which we can do without recalculating probabilities because the last # dimension of log_pi and q_samples are deterministic given the others. # Flow 2: Centered-softmax. chained_bijectors = tfp.bijectors.Chain([ tfp.bijectors.SoftmaxCentered(), tfp.bijectors.Affine( shift=log_pi[:, :, :-1], scale_identity_multiplier=1. / temperature) ]) q_samples = chained_bijectors.forward(q_samples[:, :, :-1]) log_det = chained_bijectors.inverse_log_det_jacobian( q_samples, event_ndims=1) log_det = tf.reshape(log_det, [num_samples, batch_size, num_blocks, latent_dim]) if sum_over_latents: log_det = tf.reduce_sum(log_det, axis=[2, 3]) neg_q_entropy += tf.reduce_mean(log_det) q_samples = tf.reshape( q_samples, [num_samples, batch_size * num_blocks, latent_dim, block_v_size]) if hard: x_means_idx = tf.argmax(q_samples, -1) # Take average of one-hot vectors over samples. x_means_hot = tf.reduce_mean(tf.one_hot(x_means_idx, block_v_size), 0) x_means_assignments = ( tf.reduce_mean(q_samples, 0) + tf.stop_gradient(x_means_hot - tf.reduce_mean(q_samples, 0))) else: x_means_assignments = tf.reduce_mean(gumbel_softmax_samples, 0) # Reshape assignments to [batch_size * latent_dim, num_blocks, # block_v_size]. We have to transpose between reshapes to make sure the # dimensions have the correct interpretation. x_means_assignments = tf.reshape( x_means_assignments, [batch_size, num_blocks, latent_dim, block_v_size]) x_means_assignments = tf.transpose(x_means_assignments, [0, 2, 1, 3]) x_means_assignments = tf.reshape( x_means_assignments, [batch_size * latent_dim, num_blocks, block_v_size]) return x_means_assignments, neg_q_entropy
VQ-VAE using Gumbel-Softmax. Different from `gumbel_softmax()` function as this function calculates the KL by using the discrete entropy instead of taking the argmax, and it also uses an exponential moving average to update the codebook while the `gumbel_softmax()` function includes no codebook update. Args: x: A `float`-like `Tensor` containing the latent vectors to be compared to the codebook, whose squared difference is used as the Gumbel-Softmax logits. bottleneck_bits: An `int` that sets the size of the bottleneck in `log_2`. beta: Beta factor for commitment loss (Default: 0.25). decay: Decay factor for exponential moving average (Default: 0.999). epsilon: Small value to avoid dividing by zero in EMA update (Default: 1e-5). temperature_warmup_steps: Number of steps it takes to decay temperature to 0 (Default: 150000). hard: When `True`, we use hard Gumbel-Softmax samples and force discrete latents by taking the argmax. When `False`, we use soft samples, which we treat as codebook weights (Default: False). summary: When `True`, we save histogram summaries of the KL term (Default: True). Returns: x_means_assignments: A `float`-like `Tensor` containing the codebook assignments. When `hard == True`, this is one-hot, containing the arg-max of the Gumbel-Softmax samples (and we use the straightthrough gradient). Otherwise, it contains the Gumbel-Softmax samples exactly, which are values from the `(K-1)`-simplex where `K` is the bottleneck size. loss: The loss, which is the sum of the KL between the Gumbel-Softmax and the uniform prior and the commitment loss multiplied by the beta factor. We approximate the KL by using the entropy of a categorical distribution instead of the Gumbel Softmax. def gumbel_softmax_discrete_bottleneck(x, bottleneck_bits, beta=0.25, decay=0.999, epsilon=1e-5, temperature_warmup_steps=150000, hard=False, summary=True): """VQ-VAE using Gumbel-Softmax. Different from `gumbel_softmax()` function as this function calculates the KL by using the discrete entropy instead of taking the argmax, and it also uses an exponential moving average to update the codebook while the `gumbel_softmax()` function includes no codebook update. Args: x: A `float`-like `Tensor` containing the latent vectors to be compared to the codebook, whose squared difference is used as the Gumbel-Softmax logits. bottleneck_bits: An `int` that sets the size of the bottleneck in `log_2`. beta: Beta factor for commitment loss (Default: 0.25). decay: Decay factor for exponential moving average (Default: 0.999). epsilon: Small value to avoid dividing by zero in EMA update (Default: 1e-5). temperature_warmup_steps: Number of steps it takes to decay temperature to 0 (Default: 150000). hard: When `True`, we use hard Gumbel-Softmax samples and force discrete latents by taking the argmax. When `False`, we use soft samples, which we treat as codebook weights (Default: False). summary: When `True`, we save histogram summaries of the KL term (Default: True). Returns: x_means_assignments: A `float`-like `Tensor` containing the codebook assignments. When `hard == True`, this is one-hot, containing the arg-max of the Gumbel-Softmax samples (and we use the straightthrough gradient). Otherwise, it contains the Gumbel-Softmax samples exactly, which are values from the `(K-1)`-simplex where `K` is the bottleneck size. loss: The loss, which is the sum of the KL between the Gumbel-Softmax and the uniform prior and the commitment loss multiplied by the beta factor. We approximate the KL by using the entropy of a categorical distribution instead of the Gumbel Softmax. """ bottleneck_size = 2**bottleneck_bits x_shape = common_layers.shape_list(x) hidden_size = x_shape[-1] means, ema_means, ema_count = get_vq_codebook(bottleneck_size, hidden_size) x = tf.reshape(x, [-1, hidden_size]) bottleneck_size = common_layers.shape_list(means)[0] x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keepdims=True) means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keepdims=True) scalar_prod = tf.matmul(x, means, transpose_b=True) dist = x_norm_sq + tf.transpose(means_norm_sq) - 2 * scalar_prod class_probs = tf.nn.softmax(dist) log_class_probs = tf.nn.log_softmax(dist) gumbel_samples = gumbel_sample(common_layers.shape_list(dist)) steps = temperature_warmup_steps gumbel_samples *= common_layers.inverse_exp_decay(steps // 5) * 0.5 temperature = 1.2 - common_layers.inverse_lin_decay(steps) # 10% of the time keep reasonably high temperature to keep learning. temperature = tf.cond( tf.less(tf.random_uniform([]), 0.9), lambda: temperature, lambda: tf.random_uniform([], minval=0.5, maxval=1.0)) gumbel_softmax_samples = tf.nn.softmax( (log_class_probs + gumbel_samples) / temperature) # Calculate KL between q and a uniform prior. kl = tf.reduce_sum( class_probs * (log_class_probs - tf.log(1.0 / bottleneck_size)), -1) if summary: tf.summary.histogram("KL", tf.reshape(kl, [-1])) # Straight-through gradient estimation when we're using hard assignments. if hard: x_means_idx = tf.reshape(tf.argmax(gumbel_softmax_samples, axis=-1), [-1]) x_means_hot = tf.one_hot(x_means_idx, bottleneck_size) x_means_assignments = gumbel_softmax_samples + tf.stop_gradient( x_means_hot - gumbel_softmax_samples) else: x_means_assignments = gumbel_softmax_samples x_means_assignments_flat = tf.reshape(x_means_assignments, [-1, bottleneck_size]) x_means = tf.matmul(x_means_assignments_flat, means) commitment_loss = tf.reduce_mean( tf.squared_difference(x, tf.stop_gradient(x_means))) # Update the ema variables. updated_ema_count = moving_averages.assign_moving_average( ema_count, tf.reduce_sum( tf.reshape(x_means_assignments, shape=[-1, bottleneck_size]), axis=0), decay, zero_debias=False) dw = tf.matmul(x_means_assignments, x, transpose_a=True) updated_ema_means = tf.identity( moving_averages.assign_moving_average( ema_means, dw, decay, zero_debias=False)) n = tf.reduce_sum(updated_ema_count, axis=-1, keepdims=True) updated_ema_count = ( (updated_ema_count + epsilon) / (n + bottleneck_size * epsilon) * n) updated_ema_means /= tf.expand_dims(updated_ema_count, axis=-1) with tf.control_dependencies([commitment_loss]): update_means = means.assign(updated_ema_means) with tf.control_dependencies([update_means]): loss = beta * commitment_loss # Add KL loss. loss += tf.reduce_mean(kl) x_means_assignments = tf.reshape(x_means_assignments, x_shape[:-1] + [bottleneck_size]) return x_means_assignments, loss
Simple discretization through tanh, flip bottleneck_noise many bits. def tanh_discrete_bottleneck(x, bottleneck_bits, bottleneck_noise, discretize_warmup_steps, mode): """Simple discretization through tanh, flip bottleneck_noise many bits.""" x = tf.layers.dense(x, bottleneck_bits, name="tanh_discrete_bottleneck") d0 = tf.stop_gradient(2.0 * tf.to_float(tf.less(0.0, x))) - 1.0 if mode == tf.estimator.ModeKeys.TRAIN: x += tf.truncated_normal( common_layers.shape_list(x), mean=0.0, stddev=0.2) x = tf.tanh(x) d = x + tf.stop_gradient(2.0 * tf.to_float(tf.less(0.0, x)) - 1.0 - x) if mode == tf.estimator.ModeKeys.TRAIN: noise = tf.random_uniform(common_layers.shape_list(x)) noise = 2.0 * tf.to_float(tf.less(bottleneck_noise, noise)) - 1.0 d *= noise d = common_layers.mix(d, x, discretize_warmup_steps, mode == tf.estimator.ModeKeys.TRAIN) return d, d0
Simple un-discretization from tanh. def tanh_discrete_unbottleneck(x, hidden_size): """Simple un-discretization from tanh.""" x = tf.layers.dense(x, hidden_size, name="tanh_discrete_unbottleneck") return x
Improved semantic hashing bottleneck. def isemhash_bottleneck(x, bottleneck_bits, bottleneck_noise, discretize_warmup_steps, mode, isemhash_noise_dev=0.5, isemhash_mix_prob=0.5): """Improved semantic hashing bottleneck.""" with tf.variable_scope("isemhash_bottleneck"): x = tf.layers.dense(x, bottleneck_bits, name="dense") y = common_layers.saturating_sigmoid(x) if isemhash_noise_dev > 0 and mode == tf.estimator.ModeKeys.TRAIN: noise = tf.truncated_normal( common_layers.shape_list(x), mean=0.0, stddev=isemhash_noise_dev) y = common_layers.saturating_sigmoid(x + noise) d = tf.to_float(tf.less(0.5, y)) + y - tf.stop_gradient(y) d = 2.0 * d - 1.0 # Move from [0, 1] to [-1, 1]. if mode == tf.estimator.ModeKeys.TRAIN: # Flip some bits. noise = tf.random_uniform(common_layers.shape_list(x)) noise = 2.0 * tf.to_float(tf.less(bottleneck_noise, noise)) - 1.0 d *= noise d = common_layers.mix( d, 2.0 * y - 1.0, discretize_warmup_steps, mode == tf.estimator.ModeKeys.TRAIN, max_prob=isemhash_mix_prob) return d, 0.0
Improved semantic hashing un-bottleneck. def isemhash_unbottleneck(x, hidden_size, isemhash_filter_size_multiplier=1.0): """Improved semantic hashing un-bottleneck.""" filter_size = int(hidden_size * isemhash_filter_size_multiplier) x = 0.5 * (x - 1.0) # Move from [-1, 1] to [0, 1]. with tf.variable_scope("isemhash_unbottleneck"): h1a = tf.layers.dense(x, filter_size, name="hidden1a") h1b = tf.layers.dense(1.0 - x, filter_size, name="hidden1b") h2 = tf.layers.dense(tf.nn.relu(h1a + h1b), filter_size, name="hidden2") return tf.layers.dense(tf.nn.relu(h2), hidden_size, name="final")
Meta-function calling all the above bottlenecks with hparams. def parametrized_bottleneck(x, hparams): """Meta-function calling all the above bottlenecks with hparams.""" if hparams.bottleneck_kind == "tanh_discrete": d, _ = tanh_discrete_bottleneck( x, hparams.bottleneck_bits, hparams.bottleneck_noise * 0.5, hparams.discretize_warmup_steps, hparams.mode) return d, 0.0 if hparams.bottleneck_kind == "isemhash": return isemhash_bottleneck( x, hparams.bottleneck_bits, hparams.bottleneck_noise * 0.5, hparams.discretize_warmup_steps, hparams.mode, hparams.isemhash_noise_dev, hparams.isemhash_mix_prob) if hparams.bottleneck_kind == "vq": return vq_discrete_bottleneck(x, hparams.bottleneck_bits, hparams.vq_beta, hparams.vq_decay, hparams.vq_epsilon) if hparams.bottleneck_kind == "em": return vq_discrete_bottleneck( x, hparams.bottleneck_bits, hparams.vq_beta, hparams.vq_decay, hparams.vq_epsilon, soft_em=True, num_samples=hparams.vq_num_samples) if hparams.bottleneck_kind == "gumbel_softmax": return gumbel_softmax_discrete_bottleneck( x, hparams.bottleneck_bits, hparams.vq_beta, hparams.vq_decay, hparams.vq_epsilon, hparams.temperature_warmup_steps, hard=False, summary=True) raise ValueError( "Unsupported hparams.bottleneck_kind %s" % hparams.bottleneck_kind)
Meta-function calling all the above un-bottlenecks with hparams. def parametrized_unbottleneck(x, hidden_size, hparams): """Meta-function calling all the above un-bottlenecks with hparams.""" if hparams.bottleneck_kind == "tanh_discrete": return tanh_discrete_unbottleneck(x, hidden_size) if hparams.bottleneck_kind == "isemhash": return isemhash_unbottleneck(x, hidden_size, hparams.isemhash_filter_size_multiplier) if hparams.bottleneck_kind in ["vq", "em", "gumbel_softmax"]: return vq_discrete_unbottleneck(x, hidden_size) raise ValueError( "Unsupported hparams.bottleneck_kind %s" % hparams.bottleneck_kind)
Create hyperpameters for inverse autoregressive flows. Args: hidden_size: Width of attention layers and neural network output layer. filter_size: Hidden layer width for neural network. Returns: hparams: Hyperpameters with basic presets for inverse autoregressive flows. def iaf_hparams(hidden_size=512, filter_size=4096): """Create hyperpameters for inverse autoregressive flows. Args: hidden_size: Width of attention layers and neural network output layer. filter_size: Hidden layer width for neural network. Returns: hparams: Hyperpameters with basic presets for inverse autoregressive flows. """ hparams = common_hparams.basic_params1() # Attention hyperparameters. hparams.hidden_size = hidden_size hparams.add_hparam("attention_key_channels", None) hparams.add_hparam("attention_value_channels", None) hparams.add_hparam("num_heads", 4) hparams.add_hparam("attention_dropout", 0.1) hparams.add_hparam("shared_rel", False) hparams.add_hparam("block_width", 1) hparams.add_hparam("block_length", 1) hparams.add_hparam("q_filter_width", 1) hparams.add_hparam("kv_filter_width", 1) # Preprocessing and postprocesing hyperparameters. hparams.layer_preprocess_sequence = "n" hparams.layer_prepostprocess_dropout = 0.1 hparams.norm_type = "layer" hparams.norm_epsilon = 1e-06 hparams.layer_prepostprocess_dropout_broadcast_dims = "" hparams.layer_postprocess_sequence = "da" # Feedforward neural network hyperparameters. hparams.add_hparam("filter_size", filter_size) hparams.add_hparam("ffn_layer", "conv_hidden_relu") hparams.add_hparam("relu_dropout", 0.1) return hparams
Returns a set containing the original vocabulary. This is important for comparing with published results. Args: tmp_dir: directory containing dataset. Returns: a set of strings def _original_vocab(tmp_dir): """Returns a set containing the original vocabulary. This is important for comparing with published results. Args: tmp_dir: directory containing dataset. Returns: a set of strings """ vocab_url = ("http://download.tensorflow.org/models/LM_LSTM_CNN/" "vocab-2016-09-10.txt") vocab_filename = os.path.basename(vocab_url + ".en") vocab_filepath = os.path.join(tmp_dir, vocab_filename) if not os.path.exists(vocab_filepath): generator_utils.maybe_download(tmp_dir, vocab_filename, vocab_url) return set([ text_encoder.native_to_unicode(l.strip()) for l in tf.gfile.Open(vocab_filepath) ])
Replace out-of-vocab words with "UNK". This maintains compatibility with published results. Args: original_vocab: a set of strings (The standard vocabulary for the dataset) line: a unicode string - a space-delimited sequence of words. Returns: a unicode string - a space-delimited sequence of words. def _replace_oov(original_vocab, line): """Replace out-of-vocab words with "UNK". This maintains compatibility with published results. Args: original_vocab: a set of strings (The standard vocabulary for the dataset) line: a unicode string - a space-delimited sequence of words. Returns: a unicode string - a space-delimited sequence of words. """ return u" ".join( [word if word in original_vocab else u"UNK" for word in line.split()])
Download and unpack the corpus. Args: tmp_dir: directory containing dataset. def _maybe_download_corpus(tmp_dir): """Download and unpack the corpus. Args: tmp_dir: directory containing dataset. """ corpus_url = ("http://www.statmt.org/lm-benchmark/" "1-billion-word-language-modeling-benchmark-r13output.tar.gz") corpus_filename = os.path.basename(corpus_url) corpus_filepath = os.path.join(tmp_dir, corpus_filename) if not os.path.exists(corpus_filepath): generator_utils.maybe_download(tmp_dir, corpus_filename, corpus_url) with tarfile.open(corpus_filepath, "r:gz") as corpus_tar: corpus_tar.extractall(tmp_dir)
Loss function. def lossfn(real_input, fake_input, compress, hparams, lsgan, name): """Loss function.""" eps = 1e-12 with tf.variable_scope(name): d1 = discriminator(real_input, compress, hparams, "discriminator") d2 = discriminator(fake_input, compress, hparams, "discriminator", reuse=True) if lsgan: dloss = tf.reduce_mean( tf.squared_difference(d1, 0.9)) + tf.reduce_mean(tf.square(d2)) gloss = tf.reduce_mean(tf.squared_difference(d2, 0.9)) loss = (dloss + gloss)/2 else: # cross_entropy dloss = -tf.reduce_mean( tf.log(d1 + eps)) - tf.reduce_mean(tf.log1p(eps - d2)) gloss = -tf.reduce_mean(tf.log(d2 + eps)) loss = (dloss + gloss)/2 return loss
Cycle GAN, main step used for training. def cycle_gan_internal(inputs, targets, _, hparams): """Cycle GAN, main step used for training.""" with tf.variable_scope("cycle_gan"): # Embed inputs and targets. inputs_orig, targets_orig = tf.to_int32(inputs), tf.to_int32(targets) inputs = common_layers.embedding( inputs_orig, hparams.vocab_size, hparams.hidden_size, "embed") targets = common_layers.embedding( targets_orig, hparams.vocab_size, hparams.hidden_size, "embed", reuse=True) x, _ = split_on_batch(inputs) _, y = split_on_batch(targets) # Y --> X y_fake = generator(y, hparams, "Fy", reuse=False) y_to_x_loss = lossfn(y, y_fake, True, hparams, True, "YtoX") # X --> Y x_fake = generator(x, hparams, "Gx", reuse=False) x_to_y_loss = lossfn(y, x_fake, True, hparams, True, "XtoY") # Cycle-Consistency y_fake_ = generator(y_fake, hparams, "Gx", reuse=True) x_fake_ = generator(x_fake, hparams, "Fy", reuse=True) x_to_x_loss = hparams.cycle_loss_multiplier1 * tf.reduce_mean( tf.abs(x_fake_ - x)) y_to_y_loss = hparams.cycle_loss_multiplier2 * tf.reduce_mean( tf.abs(y_fake_ - y)) cycloss = x_to_x_loss + y_to_y_loss sample_generated = generator(inputs, hparams, "Gx", reuse=True) sample_generated = tf.layers.dense( sample_generated, hparams.vocab_size, name="softmax", reuse=None) sample_generated = tf.stop_gradient( tf.expand_dims(sample_generated, axis=2)) losses = {"cycloss": cycloss, "y_to_x_loss": y_to_x_loss, "x_to_y_loss": x_to_y_loss} return sample_generated, losses
Set of hyperparameters. def cycle_gan_small(): """Set of hyperparameters.""" hparams = transformer_vae.transformer_ae_small() hparams.batch_size = 2048 hparams.bottom = { "inputs": modalities.identity_bottom, "targets": modalities.identity_bottom, } hparams.top = { "targets": modalities.identity_top, } hparams.weight_decay = 3.0 hparams.learning_rate = 0.05 hparams.kl_warmup_steps = 5000 hparams.learning_rate_warmup_steps = 3000 hparams.add_hparam("vocab_size", 66) # Vocabulary size, need to set here. hparams.add_hparam("cycle_loss_multiplier1", 10.0) hparams.add_hparam("cycle_loss_multiplier2", 10.0) return hparams
Hparams for decoding. def decode_hparams(overrides=""): """Hparams for decoding.""" hparams = decoding.decode_hparams() # Number of interpolations between [0.0, 1.0]. hparams.add_hparam("num_interp", 11) # Which level(s) to interpolate. hparams.add_hparam("level_interp", [0, 1, 2]) # "all" or "ranked", interpolate all channels or a "ranked". hparams.add_hparam("channel_interp", "all") # interpolate channels ranked according to squared L2 norm. hparams.add_hparam("rank_interp", 1) # Whether on not to save frames as summaries hparams.add_hparam("save_frames", True) hparams.parse(overrides) return hparams
Preprocess frame. 1. Converts [0, 255] to [-0.5, 0.5] 2. Adds uniform noise. Args: frame: 3-D Tensor representing pixels. Returns: frame: 3-D Tensor with values in between [-0.5, 0.5] def preprocess_frame(frame): """Preprocess frame. 1. Converts [0, 255] to [-0.5, 0.5] 2. Adds uniform noise. Args: frame: 3-D Tensor representing pixels. Returns: frame: 3-D Tensor with values in between [-0.5, 0.5] """ # Normalize from [0.0, 1.0] -> [-0.5, 0.5] frame = common_layers.convert_rgb_to_real(frame) frame = frame - 0.5 frame, _ = glow_ops.uniform_binning_correction(frame) return frame
Encode frames to latents. def frame_to_latents(frame, hparams): """Encode frames to latents.""" # Preprocess frame = preprocess_frame(frame) # Encode [X_t] to [z^1_t, z^2_t .. z^l_t] glow_vals = glow_ops.encoder_decoder( "codec", frame, hparams, eps=None, reverse=False) z_top, _, level_eps, _, _ = glow_vals return z_top, level_eps
Decodes latents to frames. def latents_to_frames(z_top_interp, level_eps_interp, hparams): """Decodes latents to frames.""" # Decode [z^1_t, z^2_t .. z^l_t] to [X_t] images, _, _, _ = glow_ops.encoder_decoder( "codec", z_top_interp, hparams, eps=level_eps_interp, reverse=True) images = glow_ops.postprocess(images) return images
Interpolate between the first input frame and last target frame. Args: features: dict of tensors hparams: HParams, training hparams. decode_hp: HParams, decode hparams. Returns: images: interpolated images, 4-D Tensor, shape=(num_interp, H, W, C) first_frame: image, 3-D Tensor, shape=(1, H, W, C) last_frame: image, 3-D Tensor, shape=(1, H, W, C) def interpolate(features, hparams, decode_hp): """Interpolate between the first input frame and last target frame. Args: features: dict of tensors hparams: HParams, training hparams. decode_hp: HParams, decode hparams. Returns: images: interpolated images, 4-D Tensor, shape=(num_interp, H, W, C) first_frame: image, 3-D Tensor, shape=(1, H, W, C) last_frame: image, 3-D Tensor, shape=(1, H, W, C) """ inputs, targets = features["inputs"], features["targets"] inputs = tf.unstack(inputs, axis=1) targets = tf.unstack(targets, axis=1) coeffs = np.linspace(0.0, 1.0, decode_hp.num_interp) # (X_1, X_t) -> (z_1, z_t) first_frame, last_frame = inputs[0], targets[-1] first_top_z, first_level_eps = frame_to_latents(first_frame, hparams) last_top_z, last_level_eps = frame_to_latents(last_frame, hparams) # Interpolate latents at all levels. first_lats = first_level_eps + [first_top_z] last_lats = last_level_eps + [last_top_z] interp_lats = [] lat_iterator = enumerate(zip(first_lats, last_lats)) for level_ind, (first_lat, last_lat) in lat_iterator: if level_ind in decode_hp.level_interp: if decode_hp.channel_interp == "all": interp_lat = glow_ops.linear_interpolate(first_lat, last_lat, coeffs) else: interp_lat = glow_ops.linear_interpolate_rank( first_lat, last_lat, coeffs, decode_hp.rank_interp) else: interp_lat = tf.tile(first_lat, [decode_hp.num_interp, 1, 1, 1]) interp_lats.append(interp_lat) level_eps_interp = interp_lats[:hparams.n_levels-1] z_top_interp = interp_lats[-1] images = latents_to_frames(z_top_interp, level_eps_interp, hparams) return images, first_frame, last_frame
Get nested summaries_log_dir based on decode_hp. def get_summaries_log_dir(decode_hp, output_dir, dataset_split): """Get nested summaries_log_dir based on decode_hp.""" child_dir = decode_hp.summaries_log_dir level_dir = "".join([str(level) for level in decode_hp.level_interp]) if decode_hp.channel_interp == "all": rank_dir = "all" else: rank_dir = "rank_%d" % decode_hp.rank_interp child_dir = "%s/%s_%s" % (child_dir, level_dir, rank_dir) if dataset_split is not None: child_dir += "_{}".format(dataset_split) return os.path.join(output_dir, child_dir)
Converts interpolated frames into tf summaries. The summaries consists of: 1. Image summary corresponding to the first frame. 2. Image summary corresponding to the last frame. 3. The interpolated frames as a gif summary. Args: sample_ind: int interpolations: Numpy array, shape=(num_interp, H, W, 3) first_frame: Numpy array, shape=(HWC) last_frame: Numpy array, shape=(HWC) hparams: HParams, train hparams decode_hp: HParams, decode hparams Returns: summaries: list of tf Summary Values. def interpolations_to_summary(sample_ind, interpolations, first_frame, last_frame, hparams, decode_hp): """Converts interpolated frames into tf summaries. The summaries consists of: 1. Image summary corresponding to the first frame. 2. Image summary corresponding to the last frame. 3. The interpolated frames as a gif summary. Args: sample_ind: int interpolations: Numpy array, shape=(num_interp, H, W, 3) first_frame: Numpy array, shape=(HWC) last_frame: Numpy array, shape=(HWC) hparams: HParams, train hparams decode_hp: HParams, decode hparams Returns: summaries: list of tf Summary Values. """ parent_tag = "sample_%d" % sample_ind frame_shape = hparams.problem.frame_shape interp_shape = [hparams.batch_size, decode_hp.num_interp] + frame_shape interpolations = np.reshape(interpolations, interp_shape) interp_tag = "%s/interp/%s" % (parent_tag, decode_hp.channel_interp) if decode_hp.channel_interp == "ranked": interp_tag = "%s/rank_%d" % (interp_tag, decode_hp.rank_interp) summaries, _ = common_video.py_gif_summary( interp_tag, interpolations, return_summary_value=True, max_outputs=decode_hp.max_display_outputs, fps=decode_hp.frames_per_second) if decode_hp.save_frames: first_frame_summ = image_utils.image_to_tf_summary_value( first_frame, "%s/first" % parent_tag) last_frame_summ = image_utils.image_to_tf_summary_value( last_frame, "%s/last" % parent_tag) summaries.append(first_frame_summ) summaries.append(last_frame_summ) return summaries
EPVA hparams. def next_frame_epva(): """EPVA hparams.""" hparams = basic_deterministic_params.next_frame_basic_deterministic() hparams.video_num_input_frames = 4 hparams.video_num_target_frames = 4 hparams.bottom = { "inputs": modalities.video_raw_bottom, "targets": modalities.video_raw_targets_bottom, } hparams.loss = { "targets": modalities.video_l2_raw_loss, } hparams.top = { "targets": modalities.video_raw_top, } hparams.learning_rate_schedule = "constant" hparams.learning_rate_constant = 1e-05 hparams.batch_size = 2 hparams.clip_grad_norm = 0.01 # TODO(msaffar): disentangle EPVA from SV2P hparams.add_hparam("reward_prediction", False) hparams.add_hparam("clip_pixel_values", True) hparams.add_hparam("context_frames", 5) hparams.add_hparam("enc_learning_rate", 1e-5) hparams.add_hparam("enc_pred_loss_scale", 0.1) hparams.add_hparam("enc_pred_loss_scale_delay", 6e5) hparams.add_hparam("enc_size", 64) hparams.add_hparam("enc_keep_prob", .65) hparams.add_hparam("enc_pred_use_l1_loss", False) hparams.add_hparam("enc_pred_use_l2norm", False) hparams.add_hparam("van_learning_rate", 3e-5) hparams.add_hparam("van_keep_prob", .9) hparams.add_hparam("sequence_length ", 64) hparams.add_hparam("skip_num", 2) hparams.add_hparam("pred_noise_std", 0) hparams.add_hparam("lstm_state_noise_stddev", 0) return hparams
Create slot variables for Adam with accumulated gradients. def _create_slots(self, var_list): """Create slot variables for Adam with accumulated gradients.""" super(MultistepAdamOptimizer, self)._create_slots(var_list) first_var = min(var_list, key=lambda x: x.name) self._create_non_slot_variable(initial_value=0 if self._n == 1 else 1, name="iter", colocate_with=first_var) for v in var_list: self._zeros_slot(v, "grad_acc", self._name)
Apply conditionally if counter is zero. def _apply_cond(self, apply_fn, grad, var, *args, **kwargs): """Apply conditionally if counter is zero.""" grad_acc = self.get_slot(var, "grad_acc") def apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs): total_grad = (grad_acc + grad) / tf.cast(self._n_t, grad.dtype) adam_op = apply_fn(total_grad, var, *args, **kwargs) with tf.control_dependencies([adam_op]): grad_acc_to_zero_op = grad_acc.assign(tf.zeros_like(grad_acc), use_locking=self._use_locking) return tf.group(adam_op, grad_acc_to_zero_op) def accumulate_gradient(grad_acc, grad): assign_op = tf.assign_add(grad_acc, grad, use_locking=self._use_locking) return tf.group(assign_op) # Strip return value return tf.cond( tf.equal(self._get_iter_variable(), 0), lambda: apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs), lambda: accumulate_gradient(grad_acc, grad))
Updates beta_power variables every n batches and incrs counter. def _finish(self, update_ops, name_scope): """Updates beta_power variables every n batches and incrs counter.""" iter_ = self._get_iter_variable() beta1_power, beta2_power = self._get_beta_accumulators() with tf.control_dependencies(update_ops): with tf.colocate_with(iter_): def update_beta_op(): update_beta1 = beta1_power.assign( beta1_power * self._beta1_t, use_locking=self._use_locking) update_beta2 = beta2_power.assign( beta2_power * self._beta2_t, use_locking=self._use_locking) return tf.group(update_beta1, update_beta2) maybe_update_beta = tf.cond( tf.equal(iter_, 0), update_beta_op, tf.no_op) with tf.control_dependencies([maybe_update_beta]): update_iter = iter_.assign(tf.mod(iter_ + 1, self._n_t), use_locking=self._use_locking) return tf.group( *update_ops + [update_iter, maybe_update_beta], name=name_scope)
A stack of transformer layers. Args: encoder_input: a Tensor encoder_self_attention_bias: bias Tensor for self-attention (see common_attention.attention_bias()) hparams: hyperparameters for model name: a string Returns: y: a Tensors def transformer_revnet_encoder(encoder_input, encoder_self_attention_bias, hparams, name="encoder"): """A stack of transformer layers. Args: encoder_input: a Tensor encoder_self_attention_bias: bias Tensor for self-attention (see common_attention.attention_bias()) hparams: hyperparameters for model name: a string Returns: y: a Tensors """ def f(x, side_input): """f(x) for reversible layer, self-attention layer.""" encoder_self_attention_bias = side_input[0] old_hid_size = hparams.hidden_size hparams.hidden_size = old_hid_size // 2 with tf.variable_scope("self_attention"): y = common_attention.multihead_attention( common_layers.layer_preprocess( x, hparams), None, encoder_self_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout) y = common_layers.layer_postprocess(x, y, hparams) hparams.hidden_size = old_hid_size return y def g(x): """g(x) for reversible layer, feed-forward layer.""" old_hid_size = hparams.hidden_size hparams.hidden_size = old_hid_size // 2 with tf.variable_scope("ffn"): y = transformer.transformer_ffn_layer( common_layers.layer_preprocess(x, hparams), hparams) y = common_layers.layer_postprocess(x, y, hparams) hparams.hidden_size = old_hid_size return y x1, x2 = tf.split(encoder_input, 2, axis=-1) with tf.variable_scope(name): y1, y2 = tf.contrib.layers.rev_block( x1, x2, f, g, num_layers=hparams.num_hidden_layers, f_side_input=[encoder_self_attention_bias], is_training=hparams.mode == tf.estimator.ModeKeys.TRAIN) y = tf.concat([y1, y2], axis=-1) return common_layers.layer_preprocess(y, hparams)
A stack of transformer layers. Args: decoder_input: a Tensor encoder_output: a Tensor decoder_self_attention_bias: bias Tensor for self-attention (see common_attention.attention_bias()) encoder_decoder_attention_bias: bias Tensor for encoder-decoder attention (see common_attention.attention_bias()) hparams: hyperparameters for model name: a string Returns: y: a Tensors def transformer_revnet_decoder(decoder_input, encoder_output, decoder_self_attention_bias, encoder_decoder_attention_bias, hparams, name="decoder"): """A stack of transformer layers. Args: decoder_input: a Tensor encoder_output: a Tensor decoder_self_attention_bias: bias Tensor for self-attention (see common_attention.attention_bias()) encoder_decoder_attention_bias: bias Tensor for encoder-decoder attention (see common_attention.attention_bias()) hparams: hyperparameters for model name: a string Returns: y: a Tensors """ def f(x, side_input): """f(x) for reversible layer, self-attention and enc-dec attention.""" decoder_self_attention_bias = side_input[0] encoder_decoder_attention_bias = side_input[1] encoder_output = side_input[2] old_hid_size = hparams.hidden_size hparams.hidden_size = old_hid_size // 2 with tf.variable_scope("self_attention"): y = common_attention.multihead_attention( common_layers.layer_preprocess( x, hparams), None, decoder_self_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout) y = common_layers.layer_postprocess(x, y, hparams) if encoder_output is not None: with tf.variable_scope("encdec_attention"): y = common_attention.multihead_attention( common_layers.layer_preprocess( x, hparams), encoder_output, encoder_decoder_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout) y = common_layers.layer_postprocess(x, y, hparams) hparams.hidden_size = old_hid_size return y def g(x): """g(x) for reversible layer, feed-forward layer.""" old_hid_size = hparams.hidden_size hparams.hidden_size = old_hid_size // 2 with tf.variable_scope("ffn"): y = transformer.transformer_ffn_layer( common_layers.layer_preprocess(x, hparams), hparams) y = common_layers.layer_postprocess(x, y, hparams) hparams.hidden_size = old_hid_size return y x1, x2 = tf.split(decoder_input, 2, axis=-1) with tf.variable_scope(name): y1, y2 = tf.contrib.layers.rev_block( x1, x2, f, g, num_layers=hparams.num_hidden_layers, f_side_input=[ decoder_self_attention_bias, encoder_decoder_attention_bias, encoder_output ], is_training=hparams.mode == tf.estimator.ModeKeys.TRAIN) y = tf.concat([y1, y2], axis=-1) return common_layers.layer_preprocess(y, hparams)
Base hparams for TransformerRevnet. def transformer_revnet_base(): """Base hparams for TransformerRevnet.""" hparams = transformer.transformer_big() # Use settings from transformer_n_da hparams.layer_preprocess_sequence = "n" hparams.layer_postprocess_sequence = "da" hparams.learning_rate = 0.4 return hparams
Base hparams for TransformerRevnet. def transformer_revnet_big(): """Base hparams for TransformerRevnet.""" hparams = transformer_revnet_base() # The TransformerRevnet uses significantly less memory than the Transformer. # Increase batch size and model size. hparams.batch_size *= 2 hparams.hidden_size *= 2 hparams.num_heads *= 2 hparams.num_hidden_layers += 1 return hparams
Over which devices do we split each training batch. In old-fashioned async mode, we split the batch over all GPUs on the current worker. In sync mode, we split the batch over all the parameter server GPUs. This function returns an expert_utils.Parallelism object, which can be used to build the model. It is configured in a way that any variables created by `tf.get_variable` will be assigned to the parameter servers and shared between datashards. Args: daisy_chain_variables: whether to copy variables in a daisy chain on GPUs. all_workers: whether the devices are all async workers or just this one. Returns: a expert_utils.Parallelism. def data_parallelism_from_flags(daisy_chain_variables=True, all_workers=False): """Over which devices do we split each training batch. In old-fashioned async mode, we split the batch over all GPUs on the current worker. In sync mode, we split the batch over all the parameter server GPUs. This function returns an expert_utils.Parallelism object, which can be used to build the model. It is configured in a way that any variables created by `tf.get_variable` will be assigned to the parameter servers and shared between datashards. Args: daisy_chain_variables: whether to copy variables in a daisy chain on GPUs. all_workers: whether the devices are all async workers or just this one. Returns: a expert_utils.Parallelism. """ dp_arg_names = inspect.getargspec(data_parallelism).args blacklist = ["daisy_chain_variables", "all_workers"] kwargs = {} for arg in dp_arg_names: if arg in blacklist: continue kwargs[arg] = getattr(tf.flags.FLAGS, arg) return data_parallelism( daisy_chain_variables=daisy_chain_variables, all_workers=all_workers, **kwargs)
See data_parallelism_from_flags. def data_parallelism(daisy_chain_variables=True, all_workers=False, ps_replicas=0, ps_job="/job:ps", ps_gpu=0, schedule="continuous_train_and_eval", sync=False, worker_gpu=1, worker_replicas=1, worker_id=0, gpu_order="", worker_job="/job:localhost", no_data_parallelism=False): """See data_parallelism_from_flags.""" tf.logging.info("schedule=%s" % schedule) tf.logging.info("worker_gpu=%s" % worker_gpu) tf.logging.info("sync=%s" % sync) def _ps_replicas(all_workers=False): if all_workers: return list(range(ps_replicas)) # Worker K will be using replicas {0,...n-1} + K*n if we have n replicas. num_replicas = ps_replicas // worker_replicas return [d + worker_id * num_replicas for d in range(num_replicas)] def _gpu_order(num_gpus): if gpu_order: ret = [int(s) for s in gpu_order.split(" ")] if len(ret) == num_gpus: return ret return list(range(num_gpus)) def _ps_gpus(all_workers=False): ps_gpus = [] for d in _ps_replicas(all_workers=all_workers): ps_gpus.extend([(d, gpu) for gpu in _gpu_order(ps_gpu)]) return ps_gpus def ps_devices(all_workers=False): """List of ps devices (where to put the experts). Args: all_workers: whether the list is for all async workers or just this one. Returns: a list of device names """ if ps_replicas > 0: if ps_gpu > 0: return [ ps_job + "/task:%d/GPU:%d" % (d, gpu) for (d, gpu) in _ps_gpus(all_workers=all_workers) ] else: return [ ps_job + "/task:%d" % d for d in _ps_replicas(all_workers=all_workers) ] else: if worker_gpu > 0: return ["gpu:%d" % d for d in _gpu_order(worker_gpu)] else: return [""] def _replica_device_setter(worker_device): if ps_replicas == 0: return worker_device return tf.train.replica_device_setter( worker_device=worker_device, ps_tasks=ps_replicas, ps_device=ps_job + "/GPU:0" if ps_gpu > 0 else ps_job) is_single_machine = ps_replicas == 0 and worker_replicas == 1 if no_data_parallelism: datashard_devices = [""] caching_devices = None elif is_single_machine: tf.logging.warn( "Schedule=%s. Assuming that training is running on a single machine.", schedule) datashard_devices = ["gpu:%d" % d for d in _gpu_order(worker_gpu)] if worker_gpu < 1: datashard_devices += ["cpu:0"] caching_devices = None elif sync and ps_replicas > 0: # compute on ps datashard_devices = [ _replica_device_setter(d) for d in ps_devices(all_workers=all_workers) ] if ps_gpu > 0 and ps_replicas > 1: caching_devices = [ ps_job + "/task:%d/cpu:0" % d for (d, _) in _ps_gpus(all_workers=all_workers) ] else: caching_devices = None else: # compute on worker - this is either a single-worker setup or asynchronous # with parameter servers. if worker_gpu > 1: datashard_devices = [ _replica_device_setter(worker_job + "/GPU:%d" % d) for d in _gpu_order(worker_gpu) ] caching_devices = None else: datashard_devices = [_replica_device_setter(worker_job)] caching_devices = None tf.logging.info("datashard_devices: %s", datashard_devices) tf.logging.info("caching_devices: %s", caching_devices) tf.logging.info("ps_devices: %s", ps_devices(all_workers=all_workers)) return eu.Parallelism( datashard_devices, caching_devices=caching_devices, daisy_chain_variables=daisy_chain_variables, ps_devices=ps_devices(all_workers=all_workers))
Generate concatenated lines from file upto up_threshold characters. def concat_generator(filename, up_threshold, low_threshold=10): """Generate concatenated lines from file upto up_threshold characters.""" txt = "" for line in tf.gfile.Open(filename): line = line.strip() if len(txt) + len(line) + 1 >= up_threshold: ret = txt txt = "" # We don't yield very short long parts to prevent noisy examples. if len(ret) > low_threshold and len(ret) < up_threshold: yield {"targets": ret} if not txt: txt = line else: txt = " ".join([txt, line])
Given python generators, generate from one, then from another, etc. def mix_generators(generator_list): """Given python generators, generate from one, then from another, etc.""" i = 0 l = len(generator_list) stopiters_seen = 0 while stopiters_seen <= l: try: yield six.next(generator_list[i % l]) i += 1 stopiters_seen = 0 except StopIteration: i += 1 stopiters_seen += 1
Compute BLEU core summaries using the decoder output. Args: hook_args: DecodeHookArgs namedtuple Returns: A list of tf.Summary values if hook_args.hparams contains the reference file and the translated file. def compute_bleu_summaries(hook_args): """Compute BLEU core summaries using the decoder output. Args: hook_args: DecodeHookArgs namedtuple Returns: A list of tf.Summary values if hook_args.hparams contains the reference file and the translated file. """ decode_hparams = hook_args.decode_hparams if not (decode_hparams.decode_reference and decode_hparams.decode_to_file): return None values = [] bleu = 100 * bleu_hook.bleu_wrapper( decode_hparams.decode_reference, decode_hparams.decode_to_file) values.append(tf.Summary.Value(tag="BLEU", simple_value=bleu)) tf.logging.info("%s: BLEU = %6.2f" % (decode_hparams.decode_to_file, bleu)) if hook_args.hparams.mlperf_mode: current_step = decode_hparams.mlperf_decode_step mlperf_log.transformer_print( key=mlperf_log.EVAL_TARGET, value=decode_hparams.mlperf_threshold) mlperf_log.transformer_print( key=mlperf_log.EVAL_ACCURACY, value={ "epoch": max(current_step // decode_hparams.iterations_per_loop - 1, 0), "value": bleu }) mlperf_log.transformer_print(key=mlperf_log.EVAL_STOP) if bleu >= decode_hparams.mlperf_threshold: decode_hparams.set_hparam("mlperf_success", True) return values
Preprocessing to strip tags in SGM files. def _preprocess_sgm(line, is_sgm): """Preprocessing to strip tags in SGM files.""" if not is_sgm: return line # In SGM files, remove <srcset ...>, <p>, <doc ...> lines. if line.startswith("<srcset") or line.startswith("</srcset"): return "" if line.startswith("<doc") or line.startswith("</doc"): return "" if line.startswith("<p>") or line.startswith("</p>"): return "" # Strip <seg> tags. line = line.strip() if line.startswith("<seg") and line.endswith("</seg>"): i = line.index(">") return line[i + 1:-6]
Concatenates all `datasets` and saves to `filename`. def compile_data(tmp_dir, datasets, filename, datatypes_to_clean=None): """Concatenates all `datasets` and saves to `filename`.""" datatypes_to_clean = datatypes_to_clean or [] filename = os.path.join(tmp_dir, filename) lang1_fname = filename + ".lang1" lang2_fname = filename + ".lang2" if tf.gfile.Exists(lang1_fname) and tf.gfile.Exists(lang2_fname): tf.logging.info("Skipping compile data, found files:\n%s\n%s", lang1_fname, lang2_fname) return filename with tf.gfile.GFile(lang1_fname, mode="w") as lang1_resfile: with tf.gfile.GFile(lang2_fname, mode="w") as lang2_resfile: for dataset in datasets: url = dataset[0] compressed_filename = os.path.basename(url) compressed_filepath = os.path.join(tmp_dir, compressed_filename) if url.startswith("http"): generator_utils.maybe_download(tmp_dir, compressed_filename, url) if dataset[1][0] == "tmx": cleaning_requested = "tmx" in datatypes_to_clean tmx_filename = os.path.join(tmp_dir, dataset[1][1]) if tmx_filename.endswith(".gz"): with gzip.open(tmx_filename, "rb") as tmx_file: _tmx_to_source_target(tmx_file, lang1_resfile, lang2_resfile, do_cleaning=cleaning_requested) else: with tf.gfile.Open(tmx_filename) as tmx_file: _tmx_to_source_target(tmx_file, lang1_resfile, lang2_resfile, do_cleaning=cleaning_requested) elif dataset[1][0] == "tsv": _, src_column, trg_column, glob_pattern = dataset[1] filenames = tf.gfile.Glob(os.path.join(tmp_dir, glob_pattern)) if not filenames: # Capture *.tgz and *.tar.gz too. mode = "r:gz" if compressed_filepath.endswith("gz") else "r" with tarfile.open(compressed_filepath, mode) as corpus_tar: corpus_tar.extractall(tmp_dir) filenames = tf.gfile.Glob(os.path.join(tmp_dir, glob_pattern)) for tsv_filename in filenames: if tsv_filename.endswith(".gz"): new_filename = tsv_filename.strip(".gz") generator_utils.gunzip_file(tsv_filename, new_filename) tsv_filename = new_filename with tf.gfile.Open(tsv_filename) as tsv_file: for line in tsv_file: if line and "\t" in line: parts = line.split("\t") source, target = parts[src_column], parts[trg_column] source, target = source.strip(), target.strip() clean_pairs = [(source, target)] if "tsv" in datatypes_to_clean: clean_pairs = cleaner_en_xx.clean_en_xx_pairs(clean_pairs) for source, target in clean_pairs: if source and target: lang1_resfile.write(source) lang1_resfile.write("\n") lang2_resfile.write(target) lang2_resfile.write("\n") else: lang1_filename, lang2_filename = dataset[1] lang1_filepath = os.path.join(tmp_dir, lang1_filename) lang2_filepath = os.path.join(tmp_dir, lang2_filename) is_sgm = ( lang1_filename.endswith("sgm") and lang2_filename.endswith("sgm")) if not (tf.gfile.Exists(lang1_filepath) and tf.gfile.Exists(lang2_filepath)): # For .tar.gz and .tgz files, we read compressed. mode = "r:gz" if compressed_filepath.endswith("gz") else "r" with tarfile.open(compressed_filepath, mode) as corpus_tar: corpus_tar.extractall(tmp_dir) if lang1_filepath.endswith(".gz"): new_filepath = lang1_filepath.strip(".gz") generator_utils.gunzip_file(lang1_filepath, new_filepath) lang1_filepath = new_filepath if lang2_filepath.endswith(".gz"): new_filepath = lang2_filepath.strip(".gz") generator_utils.gunzip_file(lang2_filepath, new_filepath) lang2_filepath = new_filepath for example in text_problems.text2text_txt_iterator( lang1_filepath, lang2_filepath): line1res = _preprocess_sgm(example["inputs"], is_sgm) line2res = _preprocess_sgm(example["targets"], is_sgm) clean_pairs = [(line1res, line2res)] if "txt" in datatypes_to_clean: clean_pairs = cleaner_en_xx.clean_en_xx_pairs(clean_pairs) for line1res, line2res in clean_pairs: if line1res and line2res: lang1_resfile.write(line1res) lang1_resfile.write("\n") lang2_resfile.write(line2res) lang2_resfile.write("\n") return filename
Get vocab for distill problems. def get_or_create_vocab(self, data_dir, tmp_dir, force_get=False): """Get vocab for distill problems.""" # We assume that vocab file is present in data_dir directory where the # data generated will be stored. vocab_filepath = os.path.join(data_dir, self.vocab_filename) encoder = text_encoder.SubwordTextEncoder(vocab_filepath) return encoder
Set hparams overrides from unparsed args list. def set_hparams_from_args(args): """Set hparams overrides from unparsed args list.""" if not args: return hp_prefix = "--hp_" tf.logging.info("Found unparsed command-line arguments. Checking if any " "start with %s and interpreting those as hparams " "settings.", hp_prefix) pairs = [] i = 0 while i < len(args): arg = args[i] if arg.startswith(hp_prefix): pairs.append((arg[len(hp_prefix):], args[i+1])) i += 2 else: tf.logging.warn("Found unknown flag: %s", arg) i += 1 as_hparams = ",".join(["%s=%s" % (key, val) for key, val in pairs]) if FLAGS.hparams: as_hparams = "," + as_hparams FLAGS.hparams += as_hparams
Create hparams. def create_hparams(): """Create hparams.""" if FLAGS.use_tpu and "tpu" not in FLAGS.hparams_set: tf.logging.warn("Not all hyperparameter sets work on TPU. " "Prefer hparams_sets with a '_tpu' suffix, " "e.g. transformer_tpu, if available for your model.") hparams_path = os.path.join(FLAGS.output_dir, "hparams.json") return trainer_lib.create_hparams(FLAGS.hparams_set, FLAGS.hparams, hparams_path=hparams_path)
Create a run config. Args: hp: model hyperparameters output_dir: model's output directory, defaults to output_dir flag. Returns: a run config def create_run_config(hp, output_dir=None): """Create a run config. Args: hp: model hyperparameters output_dir: model's output directory, defaults to output_dir flag. Returns: a run config """ save_ckpt_steps = max(FLAGS.iterations_per_loop, FLAGS.local_eval_frequency) save_ckpt_secs = FLAGS.save_checkpoints_secs or None if save_ckpt_secs: save_ckpt_steps = None assert FLAGS.output_dir or FLAGS.checkpoint_path tpu_config_extra_kwargs = {} if FLAGS.tpu_job_name is not None: tpu_config_extra_kwargs["tpu_job_name"] = FLAGS.tpu_job_name if getattr(hp, "mtf_mode", False): save_ckpt_steps = None # Disable the default saver save_ckpt_secs = None # Disable the default saver tpu_config_extra_kwargs = { "num_cores_per_replica": 1, "per_host_input_for_training": tpu_config.InputPipelineConfig.BROADCAST, } # the various custom getters we have written do not play well together yet. # TODO(noam): ask rsepassi for help here. daisy_chain_variables = ( hp.daisy_chain_variables and hp.activation_dtype == "float32" and hp.weight_dtype == "float32") return trainer_lib.create_run_config( model_name=FLAGS.model, model_dir=output_dir or os.path.expanduser(FLAGS.output_dir), master=FLAGS.master, iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.tpu_num_shards, log_device_placement=FLAGS.log_device_placement, save_checkpoints_steps=save_ckpt_steps, save_checkpoints_secs=save_ckpt_secs, keep_checkpoint_max=FLAGS.keep_checkpoint_max, keep_checkpoint_every_n_hours=FLAGS.keep_checkpoint_every_n_hours, num_gpus=FLAGS.worker_gpu, gpu_order=FLAGS.gpu_order, num_async_replicas=FLAGS.worker_replicas, gpu_mem_fraction=FLAGS.worker_gpu_memory_fraction, enable_graph_rewriter=FLAGS.enable_graph_rewriter, use_tpu=FLAGS.use_tpu, use_tpu_estimator=FLAGS.use_tpu_estimator, xla_jit_level=FLAGS.xla_jit_level, schedule=FLAGS.schedule, no_data_parallelism=hp.no_data_parallelism, optionally_use_dist_strat=FLAGS.optionally_use_dist_strat, daisy_chain_variables=daisy_chain_variables, ps_replicas=FLAGS.ps_replicas, ps_job=FLAGS.ps_job, ps_gpu=FLAGS.ps_gpu, sync=FLAGS.sync, worker_id=FLAGS.worker_id, worker_job=FLAGS.worker_job, random_seed=FLAGS.random_seed, tpu_infeed_sleep_secs=FLAGS.tpu_infeed_sleep_secs, inter_op_parallelism_threads=FLAGS.inter_op_parallelism_threads, log_step_count_steps=FLAGS.log_step_count_steps, intra_op_parallelism_threads=FLAGS.intra_op_parallelism_threads, tpu_config_extra_kwargs=tpu_config_extra_kwargs, cloud_tpu_name=FLAGS.cloud_tpu_name)
Saves FLAGS and hparams to output_dir. def save_metadata(hparams): """Saves FLAGS and hparams to output_dir.""" output_dir = os.path.expanduser(FLAGS.output_dir) if not tf.gfile.Exists(output_dir): tf.gfile.MakeDirs(output_dir) # Save FLAGS in txt file if hasattr(FLAGS, "flags_into_string"): flags_str = FLAGS.flags_into_string() t2t_flags_str = "\n".join([ "--%s=%s" % (f.name, f.value) for f in FLAGS.flags_by_module_dict()["tensor2tensor.utils.flags"] ]) else: flags_dict = FLAGS.__dict__["__flags"] flags_str = "\n".join( ["--%s=%s" % (name, str(f)) for (name, f) in flags_dict.items()]) t2t_flags_str = None flags_txt = os.path.join(output_dir, "flags.txt") with tf.gfile.Open(flags_txt, "w") as f: f.write(flags_str) if t2t_flags_str: t2t_flags_txt = os.path.join(output_dir, "flags_t2t.txt") with tf.gfile.Open(t2t_flags_txt, "w") as f: f.write(t2t_flags_str) # Save hparams as hparams.json new_hparams = hparams_lib.copy_hparams(hparams) # Modality class is not JSON serializable so remove. new_hparams.del_hparam("modality") hparams_fname = os.path.join(output_dir, "hparams.json") with tf.gfile.Open(hparams_fname, "w") as f: f.write(new_hparams.to_json(indent=0, sort_keys=True))
A stack of convolution blocks with residual connection. def residual_block(x, hparams): """A stack of convolution blocks with residual connection.""" k = (hparams.kernel_height, hparams.kernel_width) dilations_and_kernels = [((1, 1), k) for _ in range(3)] y = common_layers.subseparable_conv_block( x, hparams.hidden_size, dilations_and_kernels, padding="SAME", separability=0, name="residual_block") x = common_layers.layer_norm(x + y, hparams.hidden_size, name="lnorm") return tf.nn.dropout(x, 1.0 - hparams.dropout)
Xception body. def xception_internal(inputs, hparams): """Xception body.""" with tf.variable_scope("xception"): cur = inputs if cur.get_shape().as_list()[1] > 200: # Large image, Xception entry flow cur = xception_entry(cur, hparams.hidden_size) else: # Small image, conv cur = common_layers.conv_block( cur, hparams.hidden_size, [((1, 1), (3, 3))], first_relu=False, padding="SAME", force2d=True, name="small_image_conv") for i in range(hparams.num_hidden_layers): with tf.variable_scope("layer_%d" % i): cur = residual_block(cur, hparams) return xception_exit(cur)
Xception entry flow. def xception_entry(inputs, hidden_dim): """Xception entry flow.""" with tf.variable_scope("xception_entry"): def xnet_resblock(x, filters, res_relu, name): """Resblock.""" with tf.variable_scope(name): y = common_layers.separable_conv_block( x, filters, [((1, 1), (3, 3)), ((1, 1), (3, 3))], first_relu=True, padding="SAME", force2d=True, name="sep_conv_block") y = common_layers.pool(y, (3, 3), "MAX", "SAME", strides=(2, 2)) return y + common_layers.conv_block( x, filters, [((1, 1), (1, 1))], padding="SAME", strides=(2, 2), first_relu=res_relu, force2d=True, name="res_conv0") tf.summary.image("inputs", inputs, max_outputs=2) x = common_layers.conv_block( inputs, 32, [((1, 1), (3, 3))], first_relu=False, padding="SAME", strides=(2, 2), force2d=True, name="conv0") x = common_layers.conv_block( x, 64, [((1, 1), (3, 3))], padding="SAME", force2d=True, name="conv1") x = xnet_resblock(x, min(128, hidden_dim), True, "block0") x = xnet_resblock(x, min(256, hidden_dim), False, "block1") return xnet_resblock(x, hidden_dim, False, "block2")
Xception exit flow. def xception_exit(inputs): """Xception exit flow.""" with tf.variable_scope("xception_exit"): x = inputs x_shape = x.get_shape().as_list() if x_shape[1] is None or x_shape[2] is None: length_float = tf.to_float(tf.shape(x)[1]) length_float *= tf.to_float(tf.shape(x)[2]) spatial_dim_float = tf.sqrt(length_float) spatial_dim = tf.to_int32(spatial_dim_float) x_depth = x_shape[3] x = tf.reshape(x, [-1, spatial_dim, spatial_dim, x_depth]) elif x_shape[1] != x_shape[2]: spatial_dim = int(math.sqrt(float(x_shape[1] * x_shape[2]))) if spatial_dim * spatial_dim != x_shape[1] * x_shape[2]: raise ValueError("Assumed inputs were square-able but they were " "not. Shape: %s" % x_shape) x = tf.reshape(x, [-1, spatial_dim, spatial_dim, x_depth]) x = common_layers.conv_block_downsample(x, (3, 3), (2, 2), "SAME") return tf.nn.relu(x)
Returns a plaintext representation of HTML content. def get_text_from_html(html): """Returns a plaintext representation of HTML content.""" try: soup = bs4.BeautifulSoup(html, "html.parser") except: # pylint: disable=bare-except # Some docs don't parse return "" # Remove script and style tags for s in soup(["script", "style"]): s.decompose() return "\n".join([s for s in _soup_strings(soup)])
Return text strings in soup. def _soup_strings(soup): """Return text strings in soup.""" paragraph_tags = set([ "caption", "details", "h1", "h2", "h3", "h4", "h5", "h6", "li", "p", "td", "div", "span" ]) skip_children = None for descendant in soup.descendants: # If we've treated a tag as a contiguous paragraph, don't re-emit the # children (see below). if skip_children is not None: try: in_skip = descendant in skip_children # pylint: disable=unsupported-membership-test except RecursionError: # pylint: disable=undefined-variable # Possible for this check to hit a nasty infinite recursion because of # BeautifulSoup __eq__ checks. in_skip = True if in_skip: continue else: skip_children = None # Treat some tags as contiguous paragraphs, regardless of other tags nested # inside (like <a> or <b>). if isinstance(descendant, bs4.Tag): if descendant.name in paragraph_tags: if descendant.find_all(paragraph_tags): # If there are nested paragraph tags, don't treat it as a single # contiguous tag. continue skip_children = list(descendant.descendants) text = " ".join(descendant.get_text(" ", strip=True).split()) if text: yield text continue if (isinstance(descendant, bs4.Comment) or not isinstance(descendant, bs4.NavigableString)): continue text = " ".join(descendant.strip().split()) if text: yield text
Set of hyperparameters. def image_transformer_base(): """Set of hyperparameters.""" hparams = common_hparams.basic_params1() hparams.hidden_size = 512 hparams.batch_size = 4 hparams.max_length = 3075 hparams.dropout = 0.0 hparams.clip_grad_norm = 0. # i.e. no gradient clipping hparams.optimizer_adam_epsilon = 1e-9 hparams.learning_rate_decay_scheme = "noam" hparams.learning_rate = 0.1 hparams.learning_rate_warmup_steps = 4000 hparams.initializer_gain = 0.2 hparams.num_hidden_layers = 6 hparams.initializer = "uniform_unit_scaling" hparams.weight_decay = 0.0 hparams.optimizer_adam_beta1 = 0.9 hparams.optimizer_adam_beta2 = 0.98 hparams.label_smoothing = 0.0 hparams.bottom["targets"] = modalities.image_channel_embeddings_bottom hparams.top["targets"] = modalities.identity_top hparams.norm_type = "layer" hparams.layer_prepostprocess_dropout = 0.0 hparams.add_hparam("filter_size", 512) # Add new ones like this. # attention-related flags hparams.add_hparam("num_heads", 8) hparams.add_hparam("attention_key_channels", 0) hparams.add_hparam("attention_value_channels", 0) hparams.add_hparam("ffn_layer", "conv_hidden_relu") # All hyperparameters ending in "dropout" are automatically set to 0.0 # when not in training mode. hparams.add_hparam("attention_dropout", 0.0) hparams.add_hparam("relu_dropout", 0.0) hparams.add_hparam("pos", "timing") # timing, none hparams.add_hparam("nbr_decoder_problems", 1) hparams.add_hparam("num_output_layers", 3) hparams.add_hparam("block_size", 1) # dilated attention based flags hparams.add_hparam("gap_sizes", [2, 4, 8, 16, 32, 64, 2, 4, 8, 16, 32, 64]) # image size related flags # assuming that the image has same height and width hparams.add_hparam("img_len", 32) hparams.add_hparam("num_channels", 3) # Local attention params hparams.add_hparam("local_and_global_att", False) hparams.add_hparam("block_length", 256) hparams.add_hparam("block_width", 128) hparams.add_hparam("num_encoder_layers", 4) hparams.add_hparam("num_decoder_layers", 12) hparams.add_hparam("dec_attention_type", cia.AttentionType.LOCAL_1D) hparams.add_hparam("block_raster_scan", False) # multipos attention params hparams.add_hparam("q_filter_width", 1) hparams.add_hparam("kv_filter_width", 1) hparams.add_hparam("likelihood", cia.DistributionType.CAT) hparams.add_hparam("unconditional", False) # unconditional generation # parameters of discretized mixture of logistics loss from pixel cnn++ hparams.add_hparam("num_mixtures", 10) # These parameters are only used when ffn_layer=="local_moe_tpu" hparams.add_hparam("moe_overhead_train", 1.0) hparams.add_hparam("moe_overhead_eval", 2.0) hparams.moe_num_experts = 8 hparams.moe_loss_coef = 1e-3 # These parameters are for relative attention hparams.add_hparam("shared_rel", False) # share relative embeddings return hparams
Best config for 2.90 bits/dim on CIFAR10 using cross entropy. def imagetransformer_cifar10_base(): """Best config for 2.90 bits/dim on CIFAR10 using cross entropy.""" hparams = image_transformer_base() hparams.batch_size = 4 hparams.num_heads = 4 hparams.num_decoder_layers = 12 hparams.block_length = 256 hparams.hidden_size = 512 hparams.filter_size = 2048 hparams.learning_rate = 0.5 hparams.learning_rate_warmup_steps = 4000 hparams.layer_preprocess_sequence = "none" hparams.layer_postprocess_sequence = "dan" hparams.layer_prepostprocess_dropout = 0.3 hparams.unconditional = True return hparams
Best config for 2.90 bits/dim on CIFAR10 using DMOL. def imagetransformer_cifar10_base_dmol(): """Best config for 2.90 bits/dim on CIFAR10 using DMOL.""" hparams = image_transformer_base() hparams.likelihood = cia.DistributionType.DMOL hparams.num_channels = 1 hparams.bottom["targets"] = modalities.image_channel_compress_targets_bottom hparams.top["targets"] = modalities.identity_top hparams.num_heads = 8 hparams.batch_size = 8 hparams.sampling_method = "random" hparams.layer_preprocess_sequence = "n" hparams.layer_postprocess_sequence = "da" hparams.summarize_grads = True hparams.hidden_size = 256 hparams.filter_size = 512 hparams.attention_key_channels = 512 hparams.attention_value_channels = 512 hparams.num_decoder_layers = 12 hparams.layer_prepostprocess_dropout = 0.1 hparams.learning_rate = 0.1 hparams.layer_preprocess_sequence = "none" hparams.layer_postprocess_sequence = "dan" hparams.pos = "emb" hparams.unconditional = True return hparams
Transformer base params for cifar-10. def imagetransformer_base_tpu(): """Transformer base params for cifar-10.""" hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet() update_hparams_for_tpu(hparams) hparams.batch_size = 4 hparams.num_heads = 4 # heads are expensive on tpu hparams.num_decoder_layers = 12 hparams.block_length = 128 hparams.hidden_size = 512 hparams.filter_size = 2048 hparams.learning_rate = 0.2 hparams.learning_rate_warmup_steps = 6000 hparams.layer_preprocess_sequence = "none" hparams.layer_postprocess_sequence = "dan" hparams.layer_prepostprocess_dropout = 0.3 return hparams
Transformer base params for cifar-10. def imagetransformer_base_imagenet_tpu(): """Transformer base params for cifar-10.""" hparams = imagetransformer_base_tpu() hparams.batch_size = 4 hparams.num_heads = 4 # heads are expensive on tpu hparams.num_decoder_layers = 12 hparams.block_length = 128 hparams.layer_preprocess_sequence = "none" hparams.layer_postprocess_sequence = "dan" hparams.layer_prepostprocess_dropout = 0.1 return hparams
separate rgb embeddings. def imagetransformer_sep_channels(): """separate rgb embeddings.""" hparams = imagetransformer_base() hparams.num_heads = 4 hparams.attention_key_channels = hparams.attention_value_channels = 0 hparams.hidden_size = 256 hparams.filter_size = 512 hparams.num_hidden_layers = 6 return hparams
separate rgb embeddings. def imagetransformer_sep_channels_8l(): """separate rgb embeddings.""" hparams = imagetransformer_base() hparams.num_heads = 4 hparams.attention_key_channels = hparams.attention_value_channels = 0 hparams.hidden_size = 256 hparams.filter_size = 256 hparams.num_hidden_layers = 8 hparams.sampling_method = "random" return hparams
big 1d model for conditional image generation.2.99 on cifar10. def imagetransformer_base_8l_8h_big_cond_dr03_dan(): """big 1d model for conditional image generation.2.99 on cifar10.""" hparams = imagetransformer_sep_channels_8l() hparams.block_width = 256 hparams.block_length = 256 hparams.hidden_size = 512 hparams.num_heads = 8 hparams.filter_size = 2048 hparams.batch_size = 4 hparams.max_length = 3075 hparams.layer_preprocess_sequence = "none" hparams.layer_postprocess_sequence = "dan" hparams.num_decoder_layers = 8 hparams.layer_prepostprocess_dropout = 0.3 return hparams
big 1d model for unconditional generation on imagenet. def imagetransformer_base_10l_8h_big_uncond_dr03_dan_64(): """big 1d model for unconditional generation on imagenet.""" hparams = imagetransformer_base_10l_8h_big_cond_dr03_dan() hparams.unconditional = True hparams.max_length = 14000 hparams.batch_size = 1 hparams.img_len = 64 hparams.layer_prepostprocess_dropout = 0.1 return hparams
separate rgb embeddings. def imagetransformerpp_sep_channels_8l_8h(): """separate rgb embeddings.""" hparams = imagetransformer_base() hparams.likelihood = cia.DistributionType.DMOL hparams.num_channels = 1 hparams.bottom["targets"] = modalities.image_channel_compress_targets_bottom hparams.top["targets"] = modalities.identity_top hparams.num_heads = 8 hparams.batch_size = 4 hparams.attention_key_channels = hparams.attention_value_channels = 0 hparams.hidden_size = 512 hparams.filter_size = 512 hparams.num_hidden_layers = 8 hparams.sampling_method = "random" hparams.layer_preprocess_sequence = "n" hparams.layer_postprocess_sequence = "da" hparams.summarize_grads = True hparams.learning_rate = 0.1 return hparams
big 1d model for conditional image generation.2.99 on cifar10. def imagetransformerpp_base_8l_8h_big_cond_dr03_dan(): """big 1d model for conditional image generation.2.99 on cifar10.""" hparams = imagetransformerpp_sep_channels_8l_8h() hparams.hidden_size = 512 hparams.num_heads = 8 hparams.filter_size = 2048 hparams.batch_size = 4 hparams.max_length = 3075 hparams.layer_prepostprocess_dropout = 0.3 hparams.layer_preprocess_sequence = "none" hparams.layer_postprocess_sequence = "dan" hparams.summarize_grads = True hparams.learning_rate = 0.01 return hparams
Gets to 2.92 in just under 4 days on 8 p100s. def imagetransformerpp_base_14l_8h_big_uncond_dr03_dan_p(): """Gets to 2.92 in just under 4 days on 8 p100s.""" hparams = imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_l() hparams.num_decoder_layers = 14 hparams.batch_size = 8 hparams.layer_prepostprocess_dropout = 0.2 return hparams
For 256x256. def imagetransformerpp_base_5l_8h_big_uncond_dr00_dan_g_bs1(): """For 256x256.""" hparams = imagetransformerpp_base_10l_8h_big_uncond_dr03_dan_g() # TODO(trandustin): I forgot to set this in the runs! Maybe it's not used in # image transformer training implementation? # hparams.img_len = 256 hparams.max_length = 66000 # allow for 256x256 hparams.batch_size = 1 hparams.num_decoder_layers = 5 hparams.hidden_size = 128 hparams.filter_size = 128 hparams.attention_key_channels = 64 hparams.attention_value_channels = 64 hparams.layer_prepostprocess_dropout = 0.0 return hparams
Dilated hparams. def imagetransformer_base_8l_8h_big_cond_dr03_dan_dilated(): """Dilated hparams.""" hparams = imagetransformer_base_8l_8h_big_cond_dr03_dan() hparams.gap_sizes = [0, 16, 64, 0, 16, 64, 128, 0] hparams.dec_attention_type = cia.AttentionType.DILATED hparams.block_length = 128 hparams.block_width = 128 hparams.add_hparam("num_memory_blocks", 1) return hparams
big 1d model for conditional image generation. def imagetransformer_base_12l_8h_big(): """big 1d model for conditional image generation.""" hparams = imagetransformer_sep_channels_8l_8h() hparams.filter_size = 1024 hparams.num_decoder_layers = 12 hparams.batch_size = 1 hparams.hidden_size = 512 hparams.learning_rate_warmup_steps = 4000 hparams.sampling_method = "random" hparams.beam_size = 1 hparams.block_width = 256 return hparams
hparams fo 12 layer big 1d model for imagenet 64x64. def imagetransformer1d_base_8l_64by64(): """hparams fo 12 layer big 1d model for imagenet 64x64.""" hparams = image_transformer_base() hparams.num_heads = 8 hparams.hidden_size = 512 hparams.filter_size = 2048 hparams.num_decoder_layers = 8 hparams.batch_size = 1 hparams.block_length = 512 hparams.block_width = 768 hparams.layer_prepostprocess_dropout = 0.1 hparams.max_length = 14000 hparams.unconditional = int(False) return hparams
separate rgb embeddings. def imagetransformer_sep_channels_12l_16h_imagenet_large(): """separate rgb embeddings.""" hparams = imagetransformer_sep_channels_8l_8h() hparams.num_hidden_layers = 12 hparams.batch_size = 1 hparams.filter_size = 2048 hparams.num_heads = 16 hparams.learning_rate_warmup_steps = 16000 hparams.sampling_method = "random" hparams.learning_rate = 0.1 return hparams
separate rgb embeddings. def imagetransformer_sep_channels_16l_16h_imgnet_lrg_loc(): """separate rgb embeddings.""" hparams = imagetransformer_sep_channels_12l_16h_imagenet_large() hparams.num_hidden_layers = 16 hparams.local_attention = True hparams.batch_size = 1 hparams.block_length = 256 return hparams
separate rgb embeddings. def imagetransformer_sep_channels_16l_16h_imgnet_lrg_loc_128(): """separate rgb embeddings.""" hparams = imagetransformer_sep_channels_12l_16h_imagenet_large() hparams.num_hidden_layers = 16 hparams.local_attention = True hparams.batch_size = 1 hparams.block_length = 128 return hparams
big 1d model for conditional image generation. def imagetransformer_base_10l_16h_big_uncond_dr01_imgnet(): """big 1d model for conditional image generation.""" hparams = imagetransformer_base_14l_8h_big_dr01() # num_hidden_layers hparams.num_decoder_layers = 10 hparams.num_heads = 16 hparams.hidden_size = 1024 hparams.filter_size = 4096 hparams.batch_size = 1 hparams.layer_prepostprocess_dropout = 0.1 return hparams
big 1d model for conditional image generation. def imagetransformer_base_10l_16h_big_dr01_imgnet(): """big 1d model for conditional image generation.""" hparams = imagetransformer_base_14l_8h_big_dr01() # num_hidden_layers hparams.num_decoder_layers = 10 hparams.num_heads = 16 hparams.hidden_size = 1024 hparams.filter_size = 4096 hparams.batch_size = 1 hparams.unconditional = False hparams.layer_prepostprocess_dropout = 0.1 return hparams
separate rgb embeddings. def imagetransformer_sep_channels_8l_8h(): """separate rgb embeddings.""" hparams = imagetransformer_base() hparams.num_heads = 8 hparams.batch_size = 1 hparams.attention_key_channels = hparams.attention_value_channels = 0 hparams.hidden_size = 512 hparams.filter_size = 512 hparams.num_hidden_layers = 8 hparams.sampling_method = "random" return hparams
separate rgb embeddings. def imagetransformer_sep_channels_8l_8h_local_and_global_att(): """separate rgb embeddings.""" hparams = imagetransformer_sep_channels_8l_8h() hparams.num_heads = 8 hparams.batch_size = 1 hparams.attention_key_channels = hparams.attention_value_channels = 0 hparams.hidden_size = 256 hparams.filter_size = 256 hparams.num_hidden_layers = 4 hparams.sampling_method = "random" hparams.local_and_global_att = True return hparams
big 1d model for conditional image generation. def imagetransformer_bas8l_8h_big_uncond_dr03_imgnet(): """big 1d model for conditional image generation.""" hparams = imagetransformer_base_14l_8h_big_dr01() # num_hidden_layers hparams.num_decoder_layers = 8 hparams.num_heads = 8 hparams.hidden_size = 512 hparams.filter_size = 2048 hparams.layer_prepostprocess_dropout = 0.3 return hparams
big 1d model for conditional image generation. def imagetransformer_base_10l_16h_big_dr01_moe_imgnet(): """big 1d model for conditional image generation.""" hparams = imagetransformer_base_10l_16h_big_dr01_imgnet() hparams.initializer = "orthogonal" hparams.learning_rate_warmup_steps = 16000 hparams.add_hparam("moe_layers_decoder", "2,7") # Which layer is MoE. hparams.moe_hidden_sizes = "4096" # Hidden layer sizes (comma-separated). hparams.moe_num_experts = 64 # Number of experts in each MoE layer. hparams.moe_k = 4 # How many experts to use per batch element (try 2 or 4). hparams.moe_loss_coef = 3e-2 # MoE loss coefficient (1e-2 is usually ok). hparams.scheduled_sampling_prob = 0.1 hparams.scheduled_sampling_warmup_steps = 200000 return hparams
Set of hyperparameters for a very small imagetransformer with MoE. def imagetransformer_moe_tiny(): """Set of hyperparameters for a very small imagetransformer with MoE.""" hparams = imagetransformer_tiny() hparams.hidden_size = 64 hparams.batch_size = 1 hparams.num_hidden_layers = 3 hparams.dec_attention_type = cia.AttentionType.MOE_LOCAL_1D hparams.add_hparam("moe_layers_decoder", "1") # Which layer is MoE. hparams.moe_hidden_sizes = "1024" # Hidden layer sizes (comma-separated). hparams.moe_num_experts = 16 # Number of experts in each MoE layer. hparams.moe_k = 2 # How many experts to use per batch element (try 2 or 4). hparams.moe_loss_coef = 1e-2 # MoE loss coefficient (1e-2 is usually ok). return hparams
Hparams for training imagetransformer on tpu. def imagetransformer_sep_channels_8l_tpu(): """Hparams for training imagetransformer on tpu.""" hparams = imagetransformer_sep_channels_8l() update_hparams_for_tpu(hparams) hparams.batch_size = 4 hparams.num_heads = 4 # heads are expensive on tpu hparams.shared_embedding_and_softmax_weights = False return hparams
Small model for tpu cifar 10. def imagetransformer_b10l_4h_big_uncond_dr03_tpu(): """Small model for tpu cifar 10.""" hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet() update_hparams_for_tpu(hparams) hparams.batch_size = 4 hparams.num_heads = 4 # heads are expensive on tpu hparams.num_decoder_layers = 10 hparams.block_length = 128 hparams.hidden_size = 512 hparams.filter_size = 1024 hparams.learning_rate = 0.2 hparams.layer_preprocess_sequence = "none" hparams.layer_postprocess_sequence = "dan" return hparams
Moe tpu params. def imagetransformer_b10l_dr03_moe_tpu(): """Moe tpu params.""" hparams = imagetransformer_b10l_4h_big_uncond_dr03_tpu() update_hparams_for_tpu(hparams) hparams.batch_size = 4 hparams.num_heads = 4 # heads are expensive on tpu hparams.num_decoder_layers = 10 hparams.layer_preprocess_sequence = "none" hparams.layer_postprocess_sequence = "dan" hparams.ffn_layer = "local_moe_tpu" return hparams
TPU related small model. def imagetransformer_b10l_4h_big_uncond_dr03_lr025_tpu(): """TPU related small model.""" hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet() update_hparams_for_tpu(hparams) hparams.batch_size = 4 hparams.num_heads = 4 # heads are expensive on tpu hparams.num_decoder_layers = 10 hparams.learning_rate = 0.25 hparams.learning_rate_warmup_steps = 8000 hparams.layer_preprocess_sequence = "none" hparams.layer_postprocess_sequence = "dan" # hparams.unconditional = True return hparams
works very well on 4x4. def imagetransformer_b12l_4h_b256_uncond_dr03_tpu(): """works very well on 4x4.""" hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet() update_hparams_for_tpu(hparams) hparams.batch_size = 4 hparams.num_heads = 4 # heads are expensive on tpu hparams.num_decoder_layers = 12 hparams.block_length = 256 hparams.hidden_size = 512 hparams.filter_size = 2048 hparams.learning_rate = 0.5 hparams.learning_rate_warmup_steps = 4000 hparams.layer_preprocess_sequence = "none" hparams.layer_postprocess_sequence = "dan" hparams.layer_prepostprocess_dropout = 0.3 hparams.unconditional = True return hparams
works very well on 4x4. def imagetransformer_b12l_4h_b256_uncond_dr03_rel_tpu(): """works very well on 4x4.""" hparams = imagetransformer_b12l_4h_b256_uncond_dr03_tpu() hparams.shared_rel = True hparams.dec_attention_type = cia.AttentionType.RELATIVE_LOCAL_1D return hparams
Range of hyperparameters for vizier. def imagetransformer_cifar_tpu_range(rhp): """Range of hyperparameters for vizier.""" # After starting from base, set intervals for some parameters. rhp.set_float("learning_rate", 0.01, 1.0, scale=rhp.LOG_SCALE) rhp.set_discrete("num_decoder_layers", [8, 10, 12, 14, 16]) rhp.set_discrete("hidden_size", [256, 512, 1024]) rhp.set_discrete("block_length", [128, 256, 512]) rhp.set_categorical("dec_attention_type", [ cia.AttentionType.RELATIVE_LOCAL_1D, cia.AttentionType.LOCAL_1D])
TPU related imagenet model. def imagetransformer_b12l_4h_b128_h512_uncond_dr01_im(): """TPU related imagenet model.""" hparams = imagetransformer_b12l_4h_b256_uncond_dr03_tpu() update_hparams_for_tpu(hparams) hparams.batch_size = 4 hparams.optimizer = "Adafactor" hparams.learning_rate_schedule = "rsqrt_decay" hparams.learning_rate_warmup_steps = 6000 hparams.layer_prepostprocess_dropout = 0.1 return hparams
TPU related small model. def imagetransformer_b12l_4h_uncond_dr03_tpu(): """TPU related small model.""" hparams = imagetransformer_b12l_4h_b256_uncond_dr03_tpu() hparams.learning_rate = 0.2 hparams.learning_rate_warmup_steps = 4000 hparams.layer_preprocess_sequence = "none" hparams.layer_postprocess_sequence = "dan" hparams.layer_prepostprocess_dropout = 0.3 return hparams
TPU config for cifar 10. def imagetransformer_b12l_4h_b128_uncond_dr03_tpu(): """TPU config for cifar 10.""" hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet() update_hparams_for_tpu(hparams) hparams.batch_size = 2 hparams.num_heads = 4 # heads are expensive on tpu hparams.num_decoder_layers = 12 hparams.block_length = 128 hparams.hidden_size = 256 hparams.filter_size = 2048 hparams.layer_preprocess_sequence = "none" hparams.layer_postprocess_sequence = "dan" hparams.layer_prepostprocess_dropout = 0.1 hparams.optimizer = "Adafactor" hparams.learning_rate_schedule = "rsqrt_decay" hparams.learning_rate_warmup_steps = 10000 return hparams
TPU related 12 layer 8 heads model. def imagetransformer_b12l_8h_b256_uncond_dr03_tpu(): """TPU related 12 layer 8 heads model.""" hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet() update_hparams_for_tpu(hparams) hparams.batch_size = 2 hparams.num_heads = 8 # heads are expensive on tpu hparams.num_decoder_layers = 12 hparams.block_length = 256 hparams.hidden_size = 512 hparams.filter_size = 2048 hparams.layer_preprocess_sequence = "none" hparams.layer_postprocess_sequence = "dan" hparams.layer_prepostprocess_dropout = 0.3 return hparams
big 1d model for conditional image generation. def imagetransformer_b10l_4h_big_uncond_dr01_tpu(): """big 1d model for conditional image generation.""" hparams = imagetransformer_b12l_4h_big_uncond_dr03_tpu() # num_hidden_layers hparams.num_decoder_layers = 10 hparams.num_heads = 4 hparams.hidden_size = 1024 hparams.filter_size = 4096 hparams.batch_size = 1 hparams.layer_prepostprocess_dropout = 0.1 return hparams