text
stringlengths 81
112k
|
|---|
Gets current model step.
:return: current model step.
def get_current_step(self):
"""
Gets current model step.
:return: current model step.
"""
step = self.sess.run(self.model.global_step)
return step
|
Saves the model
:param steps: The number of steps the model was trained for
:return:
def save_model(self, steps):
"""
Saves the model
:param steps: The number of steps the model was trained for
:return:
"""
with self.graph.as_default():
last_checkpoint = self.model_path + '/model-' + str(steps) + '.cptk'
self.saver.save(self.sess, last_checkpoint)
tf.train.write_graph(self.graph, self.model_path,
'raw_graph_def.pb', as_text=False)
|
Exports latest saved model to .nn format for Unity embedding.
def export_model(self):
"""
Exports latest saved model to .nn format for Unity embedding.
"""
with self.graph.as_default():
target_nodes = ','.join(self._process_graph())
ckpt = tf.train.get_checkpoint_state(self.model_path)
freeze_graph.freeze_graph(
input_graph=self.model_path + '/raw_graph_def.pb',
input_binary=True,
input_checkpoint=ckpt.model_checkpoint_path,
output_node_names=target_nodes,
output_graph=(self.model_path + '/frozen_graph_def.pb'),
clear_devices=True, initializer_nodes='', input_saver='',
restore_op_name='save/restore_all',
filename_tensor_name='save/Const:0')
tf2bc.convert(self.model_path + '/frozen_graph_def.pb', self.model_path + '.nn')
logger.info('Exported ' + self.model_path + '.nn file')
|
Gets the list of the output nodes present in the graph for inference
:return: list of node names
def _process_graph(self):
"""
Gets the list of the output nodes present in the graph for inference
:return: list of node names
"""
all_nodes = [x.name for x in self.graph.as_graph_def().node]
nodes = [x for x in all_nodes if x in self.possible_output_nodes]
logger.info('List of nodes to export for brain :' + self.brain.brain_name)
for n in nodes:
logger.info('\t' + n)
return nodes
|
Resets all the local local_buffers
def reset_local_buffers(self):
"""
Resets all the local local_buffers
"""
agent_ids = list(self.keys())
for k in agent_ids:
self[k].reset_agent()
|
Appends the buffer of an agent to the update buffer.
:param agent_id: The id of the agent which data will be appended
:param key_list: The fields that must be added. If None: all fields will be appended.
:param batch_size: The number of elements that must be appended. If None: All of them will be.
:param training_length: The length of the samples that must be appended. If None: only takes one element.
def append_update_buffer(self, agent_id, key_list=None, batch_size=None, training_length=None):
"""
Appends the buffer of an agent to the update buffer.
:param agent_id: The id of the agent which data will be appended
:param key_list: The fields that must be added. If None: all fields will be appended.
:param batch_size: The number of elements that must be appended. If None: All of them will be.
:param training_length: The length of the samples that must be appended. If None: only takes one element.
"""
if key_list is None:
key_list = self[agent_id].keys()
if not self[agent_id].check_length(key_list):
raise BufferException("The length of the fields {0} for agent {1} where not of same length"
.format(key_list, agent_id))
for field_key in key_list:
self.update_buffer[field_key].extend(
self[agent_id][field_key].get_batch(batch_size=batch_size, training_length=training_length)
)
|
Appends the buffer of all agents to the update buffer.
:param key_list: The fields that must be added. If None: all fields will be appended.
:param batch_size: The number of elements that must be appended. If None: All of them will be.
:param training_length: The length of the samples that must be appended. If None: only takes one element.
def append_all_agent_batch_to_update_buffer(self, key_list=None, batch_size=None, training_length=None):
"""
Appends the buffer of all agents to the update buffer.
:param key_list: The fields that must be added. If None: all fields will be appended.
:param batch_size: The number of elements that must be appended. If None: All of them will be.
:param training_length: The length of the samples that must be appended. If None: only takes one element.
"""
for agent_id in self.keys():
self.append_update_buffer(agent_id, key_list, batch_size, training_length)
|
Launches training session.
:param process_queue: Queue used to send signal back to main.
:param sub_id: Unique id for training session.
:param run_seed: Random seed used for training.
:param run_options: Command line arguments for training.
def run_training(sub_id: int, run_seed: int, run_options, process_queue):
"""
Launches training session.
:param process_queue: Queue used to send signal back to main.
:param sub_id: Unique id for training session.
:param run_seed: Random seed used for training.
:param run_options: Command line arguments for training.
"""
# Docker Parameters
docker_target_name = (run_options['--docker-target-name']
if run_options['--docker-target-name'] != 'None' else None)
# General parameters
env_path = (run_options['--env']
if run_options['--env'] != 'None' else None)
run_id = run_options['--run-id']
load_model = run_options['--load']
train_model = run_options['--train']
save_freq = int(run_options['--save-freq'])
keep_checkpoints = int(run_options['--keep-checkpoints'])
base_port = int(run_options['--base-port'])
num_envs = int(run_options['--num-envs'])
curriculum_folder = (run_options['--curriculum']
if run_options['--curriculum'] != 'None' else None)
lesson = int(run_options['--lesson'])
fast_simulation = not bool(run_options['--slow'])
no_graphics = run_options['--no-graphics']
trainer_config_path = run_options['<trainer-config-path>']
# Recognize and use docker volume if one is passed as an argument
if not docker_target_name:
model_path = './models/{run_id}-{sub_id}'.format(run_id=run_id, sub_id=sub_id)
summaries_dir = './summaries'
else:
trainer_config_path = \
'/{docker_target_name}/{trainer_config_path}'.format(
docker_target_name=docker_target_name,
trainer_config_path=trainer_config_path)
if curriculum_folder is not None:
curriculum_folder = \
'/{docker_target_name}/{curriculum_folder}'.format(
docker_target_name=docker_target_name,
curriculum_folder=curriculum_folder)
model_path = '/{docker_target_name}/models/{run_id}-{sub_id}'.format(
docker_target_name=docker_target_name,
run_id=run_id,
sub_id=sub_id)
summaries_dir = '/{docker_target_name}/summaries'.format(
docker_target_name=docker_target_name)
trainer_config = load_config(trainer_config_path)
env_factory = create_environment_factory(
env_path,
docker_target_name,
no_graphics,
run_seed,
base_port + (sub_id * num_envs)
)
env = SubprocessUnityEnvironment(env_factory, num_envs)
maybe_meta_curriculum = try_create_meta_curriculum(curriculum_folder, env)
# Create controller and begin training.
tc = TrainerController(model_path, summaries_dir, run_id + '-' + str(sub_id),
save_freq, maybe_meta_curriculum,
load_model, train_model,
keep_checkpoints, lesson, env.external_brains,
run_seed, fast_simulation)
# Signal that environment has been launched.
process_queue.put(True)
# Begin training
tc.start_learning(env, trainer_config)
|
Get an action using this trainer's current policy.
:param curr_info: Current BrainInfo.
:return: The ActionInfo given by the policy given the BrainInfo.
def get_action(self, curr_info: BrainInfo) -> ActionInfo:
"""
Get an action using this trainer's current policy.
:param curr_info: Current BrainInfo.
:return: The ActionInfo given by the policy given the BrainInfo.
"""
self.trainer_metrics.start_experience_collection_timer()
action = self.policy.get_action(curr_info)
self.trainer_metrics.end_experience_collection_timer()
return action
|
Saves training statistics to Tensorboard.
:param delta_train_start: Time elapsed since training started.
:param lesson_num: Current lesson number in curriculum.
:param global_step: The number of steps the simulation has been going for
def write_summary(self, global_step, delta_train_start, lesson_num=0):
"""
Saves training statistics to Tensorboard.
:param delta_train_start: Time elapsed since training started.
:param lesson_num: Current lesson number in curriculum.
:param global_step: The number of steps the simulation has been going for
"""
if global_step % self.trainer_parameters['summary_freq'] == 0 and global_step != 0:
is_training = "Training." if self.is_training and self.get_step <= self.get_max_steps else "Not Training."
if len(self.stats['Environment/Cumulative Reward']) > 0:
mean_reward = np.mean(
self.stats['Environment/Cumulative Reward'])
LOGGER.info(" {}: {}: Step: {}. "
"Time Elapsed: {:0.3f} s "
"Mean "
"Reward: {"
":0.3f}. Std of Reward: {:0.3f}. {}"
.format(self.run_id, self.brain_name,
min(self.get_step, self.get_max_steps),
delta_train_start,
mean_reward, np.std(
self.stats['Environment/Cumulative Reward']),
is_training))
else:
LOGGER.info(" {}: {}: Step: {}. No episode was completed since last summary. {}"
.format(self.run_id, self.brain_name, self.get_step, is_training))
summary = tf.Summary()
for key in self.stats:
if len(self.stats[key]) > 0:
stat_mean = float(np.mean(self.stats[key]))
summary.value.add(tag='{}'.format(
key), simple_value=stat_mean)
self.stats[key] = []
summary.value.add(tag='Environment/Lesson', simple_value=lesson_num)
self.summary_writer.add_summary(summary, self.get_step)
self.summary_writer.flush()
|
Saves text to Tensorboard.
Note: Only works on tensorflow r1.2 or above.
:param key: The name of the text.
:param input_dict: A dictionary that will be displayed in a table on Tensorboard.
def write_tensorboard_text(self, key, input_dict):
"""
Saves text to Tensorboard.
Note: Only works on tensorflow r1.2 or above.
:param key: The name of the text.
:param input_dict: A dictionary that will be displayed in a table on Tensorboard.
"""
try:
with tf.Session() as sess:
s_op = tf.summary.text(key, tf.convert_to_tensor(
([[str(x), str(input_dict[x])] for x in input_dict])))
s = sess.run(s_op)
self.summary_writer.add_summary(s, self.get_step)
except:
LOGGER.info(
"Cannot write text summary for Tensorboard. Tensorflow version must be r1.2 or above.")
pass
|
A dict from brain name to the brain's curriculum's lesson number.
def lesson_nums(self):
"""A dict from brain name to the brain's curriculum's lesson number."""
lesson_nums = {}
for brain_name, curriculum in self.brains_to_curriculums.items():
lesson_nums[brain_name] = curriculum.lesson_num
return lesson_nums
|
Attempts to increments all the lessons of all the curriculums in this
MetaCurriculum. Note that calling this method does not guarantee the
lesson of a curriculum will increment. The lesson of a curriculum will
only increment if the specified measure threshold defined in the
curriculum has been reached and the minimum number of episodes in the
lesson have been completed.
Args:
measure_vals (dict): A dict of brain name to measure value.
reward_buff_sizes (dict): A dict of brain names to the size of their
corresponding reward buffers.
Returns:
A dict from brain name to whether that brain's lesson number was
incremented.
def increment_lessons(self, measure_vals, reward_buff_sizes=None):
"""Attempts to increments all the lessons of all the curriculums in this
MetaCurriculum. Note that calling this method does not guarantee the
lesson of a curriculum will increment. The lesson of a curriculum will
only increment if the specified measure threshold defined in the
curriculum has been reached and the minimum number of episodes in the
lesson have been completed.
Args:
measure_vals (dict): A dict of brain name to measure value.
reward_buff_sizes (dict): A dict of brain names to the size of their
corresponding reward buffers.
Returns:
A dict from brain name to whether that brain's lesson number was
incremented.
"""
ret = {}
if reward_buff_sizes:
for brain_name, buff_size in reward_buff_sizes.items():
if self._lesson_ready_to_increment(brain_name, buff_size):
measure_val = measure_vals[brain_name]
ret[brain_name] = (self.brains_to_curriculums[brain_name]
.increment_lesson(measure_val))
else:
for brain_name, measure_val in measure_vals.items():
ret[brain_name] = (self.brains_to_curriculums[brain_name]
.increment_lesson(measure_val))
return ret
|
Sets all the curriculums in this meta curriculum to a specified
lesson number.
Args:
lesson_num (int): The lesson number which all the curriculums will
be set to.
def set_all_curriculums_to_lesson_num(self, lesson_num):
"""Sets all the curriculums in this meta curriculum to a specified
lesson number.
Args:
lesson_num (int): The lesson number which all the curriculums will
be set to.
"""
for _, curriculum in self.brains_to_curriculums.items():
curriculum.lesson_num = lesson_num
|
Get the combined configuration of all curriculums in this
MetaCurriculum.
Returns:
A dict from parameter to value.
def get_config(self):
"""Get the combined configuration of all curriculums in this
MetaCurriculum.
Returns:
A dict from parameter to value.
"""
config = {}
for _, curriculum in self.brains_to_curriculums.items():
curr_config = curriculum.get_config()
config.update(curr_config)
return config
|
Sends a signal to reset the unity environment.
:return: AllBrainInfo : A data structure corresponding to the initial reset state of the environment.
def reset(self, config=None, train_mode=True, custom_reset_parameters=None) -> AllBrainInfo:
"""
Sends a signal to reset the unity environment.
:return: AllBrainInfo : A data structure corresponding to the initial reset state of the environment.
"""
if config is None:
config = self._resetParameters
elif config:
logger.info("Academy reset with parameters: {0}"
.format(', '.join([str(x) + ' -> ' + str(config[x]) for x in config])))
for k in config:
if (k in self._resetParameters) and (isinstance(config[k], (int, float))):
self._resetParameters[k] = config[k]
elif not isinstance(config[k], (int, float)):
raise UnityEnvironmentException(
"The value for parameter '{0}'' must be an Integer or a Float.".format(k))
else:
raise UnityEnvironmentException(
"The parameter '{0}' is not a valid parameter.".format(k))
if self._loaded:
outputs = self.communicator.exchange(
self._generate_reset_input(train_mode, config, custom_reset_parameters)
)
if outputs is None:
raise KeyboardInterrupt
rl_output = outputs.rl_output
s = self._get_state(rl_output)
self._global_done = s[1]
for _b in self._external_brain_names:
self._n_agents[_b] = len(s[0][_b].agents)
return s[0]
else:
raise UnityEnvironmentException("No Unity environment is loaded.")
|
Provides the environment with an action, moves the environment dynamics forward accordingly,
and returns observation, state, and reward information to the agent.
:param value: Value estimates provided by agents.
:param vector_action: Agent's vector action. Can be a scalar or vector of int/floats.
:param memory: Vector corresponding to memory used for recurrent policies.
:param text_action: Text action to send to environment for.
:param custom_action: Optional instance of a CustomAction protobuf message.
:return: AllBrainInfo : A Data structure corresponding to the new state of the environment.
def step(self, vector_action=None, memory=None, text_action=None, value=None, custom_action=None) -> AllBrainInfo:
"""
Provides the environment with an action, moves the environment dynamics forward accordingly,
and returns observation, state, and reward information to the agent.
:param value: Value estimates provided by agents.
:param vector_action: Agent's vector action. Can be a scalar or vector of int/floats.
:param memory: Vector corresponding to memory used for recurrent policies.
:param text_action: Text action to send to environment for.
:param custom_action: Optional instance of a CustomAction protobuf message.
:return: AllBrainInfo : A Data structure corresponding to the new state of the environment.
"""
vector_action = {} if vector_action is None else vector_action
memory = {} if memory is None else memory
text_action = {} if text_action is None else text_action
value = {} if value is None else value
custom_action = {} if custom_action is None else custom_action
# Check that environment is loaded, and episode is currently running.
if self._loaded and not self._global_done and self._global_done is not None:
if isinstance(vector_action, self.SINGLE_BRAIN_ACTION_TYPES):
if self._num_external_brains == 1:
vector_action = {self._external_brain_names[0]: vector_action}
elif self._num_external_brains > 1:
raise UnityActionException(
"You have {0} brains, you need to feed a dictionary of brain names a keys, "
"and vector_actions as values".format(self._num_brains))
else:
raise UnityActionException(
"There are no external brains in the environment, "
"step cannot take a vector_action input")
if isinstance(memory, self.SINGLE_BRAIN_ACTION_TYPES):
if self._num_external_brains == 1:
memory = {self._external_brain_names[0]: memory}
elif self._num_external_brains > 1:
raise UnityActionException(
"You have {0} brains, you need to feed a dictionary of brain names as keys "
"and memories as values".format(self._num_brains))
else:
raise UnityActionException(
"There are no external brains in the environment, "
"step cannot take a memory input")
if isinstance(text_action, self.SINGLE_BRAIN_TEXT_TYPES):
if self._num_external_brains == 1:
text_action = {self._external_brain_names[0]: text_action}
elif self._num_external_brains > 1:
raise UnityActionException(
"You have {0} brains, you need to feed a dictionary of brain names as keys "
"and text_actions as values".format(self._num_brains))
else:
raise UnityActionException(
"There are no external brains in the environment, "
"step cannot take a value input")
if isinstance(value, self.SINGLE_BRAIN_ACTION_TYPES):
if self._num_external_brains == 1:
value = {self._external_brain_names[0]: value}
elif self._num_external_brains > 1:
raise UnityActionException(
"You have {0} brains, you need to feed a dictionary of brain names as keys "
"and state/action value estimates as values".format(self._num_brains))
else:
raise UnityActionException(
"There are no external brains in the environment, "
"step cannot take a value input")
if isinstance(custom_action, CustomAction):
if self._num_external_brains == 1:
custom_action = {self._external_brain_names[0]: custom_action}
elif self._num_external_brains > 1:
raise UnityActionException(
"You have {0} brains, you need to feed a dictionary of brain names as keys "
"and CustomAction instances as values".format(self._num_brains))
else:
raise UnityActionException(
"There are no external brains in the environment, "
"step cannot take a custom_action input")
for brain_name in list(vector_action.keys()) + list(memory.keys()) + list(
text_action.keys()):
if brain_name not in self._external_brain_names:
raise UnityActionException(
"The name {0} does not correspond to an external brain "
"in the environment".format(brain_name))
for brain_name in self._external_brain_names:
n_agent = self._n_agents[brain_name]
if brain_name not in vector_action:
if self._brains[brain_name].vector_action_space_type == "discrete":
vector_action[brain_name] = [0.0] * n_agent * len(
self._brains[brain_name].vector_action_space_size)
else:
vector_action[brain_name] = [0.0] * n_agent * \
self._brains[
brain_name].vector_action_space_size[0]
else:
vector_action[brain_name] = self._flatten(vector_action[brain_name])
if brain_name not in memory:
memory[brain_name] = []
else:
if memory[brain_name] is None:
memory[brain_name] = []
else:
memory[brain_name] = self._flatten(memory[brain_name])
if brain_name not in text_action:
text_action[brain_name] = [""] * n_agent
else:
if text_action[brain_name] is None:
text_action[brain_name] = [""] * n_agent
if isinstance(text_action[brain_name], str):
text_action[brain_name] = [text_action[brain_name]] * n_agent
if brain_name not in custom_action:
custom_action[brain_name] = [None] * n_agent
else:
if custom_action[brain_name] is None:
custom_action[brain_name] = [None] * n_agent
if isinstance(custom_action[brain_name], CustomAction):
custom_action[brain_name] = [custom_action[brain_name]] * n_agent
number_text_actions = len(text_action[brain_name])
if not ((number_text_actions == n_agent) or number_text_actions == 0):
raise UnityActionException(
"There was a mismatch between the provided text_action and "
"the environment's expectation: "
"The brain {0} expected {1} text_action but was given {2}".format(
brain_name, n_agent, number_text_actions))
discrete_check = self._brains[brain_name].vector_action_space_type == "discrete"
expected_discrete_size = n_agent * len(
self._brains[brain_name].vector_action_space_size)
continuous_check = self._brains[brain_name].vector_action_space_type == "continuous"
expected_continuous_size = self._brains[brain_name].vector_action_space_size[
0] * n_agent
if not ((discrete_check and len(
vector_action[brain_name]) == expected_discrete_size) or
(continuous_check and len(
vector_action[brain_name]) == expected_continuous_size)):
raise UnityActionException(
"There was a mismatch between the provided action and "
"the environment's expectation: "
"The brain {0} expected {1} {2} action(s), but was provided: {3}"
.format(brain_name, str(expected_discrete_size)
if discrete_check
else str(expected_continuous_size),
self._brains[brain_name].vector_action_space_type,
str(vector_action[brain_name])))
outputs = self.communicator.exchange(
self._generate_step_input(vector_action, memory, text_action, value, custom_action))
if outputs is None:
raise KeyboardInterrupt
rl_output = outputs.rl_output
state = self._get_state(rl_output)
self._global_done = state[1]
for _b in self._external_brain_names:
self._n_agents[_b] = len(state[0][_b].agents)
return state[0]
elif not self._loaded:
raise UnityEnvironmentException("No Unity environment is loaded.")
elif self._global_done:
raise UnityActionException(
"The episode is completed. Reset the environment with 'reset()'")
elif self.global_done is None:
raise UnityActionException(
"You cannot conduct step without first calling reset. "
"Reset the environment with 'reset()'")
|
Converts arrays to list.
:param arr: numpy vector.
:return: flattened list.
def _flatten(cls, arr) -> List[float]:
"""
Converts arrays to list.
:param arr: numpy vector.
:return: flattened list.
"""
if isinstance(arr, cls.SCALAR_ACTION_TYPES):
arr = [float(arr)]
if isinstance(arr, np.ndarray):
arr = arr.tolist()
if len(arr) == 0:
return arr
if isinstance(arr[0], np.ndarray):
arr = [item for sublist in arr for item in sublist.tolist()]
if isinstance(arr[0], list):
arr = [item for sublist in arr for item in sublist]
arr = [float(x) for x in arr]
return arr
|
Collects experience information from all external brains in environment at current step.
:return: a dictionary of BrainInfo objects.
def _get_state(self, output: UnityRLOutput) -> (AllBrainInfo, bool):
"""
Collects experience information from all external brains in environment at current step.
:return: a dictionary of BrainInfo objects.
"""
_data = {}
global_done = output.global_done
for brain_name in output.agentInfos:
agent_info_list = output.agentInfos[brain_name].value
_data[brain_name] = BrainInfo.from_agent_proto(agent_info_list,
self.brains[brain_name])
return _data, global_done
|
Inform Metrics class that experience collection is done.
def end_experience_collection_timer(self):
"""
Inform Metrics class that experience collection is done.
"""
if self.time_start_experience_collection:
curr_delta = time() - self.time_start_experience_collection
if self.delta_last_experience_collection is None:
self.delta_last_experience_collection = curr_delta
else:
self.delta_last_experience_collection += curr_delta
self.time_start_experience_collection = None
|
Inform Metrics class about time to step in environment.
def add_delta_step(self, delta: float):
"""
Inform Metrics class about time to step in environment.
"""
if self.delta_last_experience_collection:
self.delta_last_experience_collection += delta
else:
self.delta_last_experience_collection = delta
|
Inform Metrics class that policy update has started.
:int number_experiences: Number of experiences in Buffer at this point.
:float mean_return: Return averaged across all cumulative returns since last policy update
def start_policy_update_timer(self, number_experiences: int, mean_return: float):
"""
Inform Metrics class that policy update has started.
:int number_experiences: Number of experiences in Buffer at this point.
:float mean_return: Return averaged across all cumulative returns since last policy update
"""
self.last_buffer_length = number_experiences
self.last_mean_return = mean_return
self.time_policy_update_start = time()
|
Inform Metrics class that policy update has started.
def end_policy_update(self):
"""
Inform Metrics class that policy update has started.
"""
if self.time_policy_update_start:
self.delta_policy_update = time() - self.time_policy_update_start
else:
self.delta_policy_update = 0
delta_train_start = time() - self.time_training_start
LOGGER.debug(" Policy Update Training Metrics for {}: "
"\n\t\tTime to update Policy: {:0.3f} s \n"
"\t\tTime elapsed since training: {:0.3f} s \n"
"\t\tTime for experience collection: {:0.3f} s \n"
"\t\tBuffer Length: {} \n"
"\t\tReturns : {:0.3f}\n"
.format(self.brain_name, self.delta_policy_update,
delta_train_start, self.delta_last_experience_collection,
self.last_buffer_length, self.last_mean_return))
self._add_row(delta_train_start)
|
Write Training Metrics to CSV
def write_training_metrics(self):
"""
Write Training Metrics to CSV
"""
with open(self.path, 'w') as file:
writer = csv.writer(file)
writer.writerow(FIELD_NAMES)
for row in self.rows:
writer.writerow(row)
|
Creates TF ops to track and increment recent average cumulative reward.
def create_reward_encoder():
"""Creates TF ops to track and increment recent average cumulative reward."""
last_reward = tf.Variable(0, name="last_reward", trainable=False, dtype=tf.float32)
new_reward = tf.placeholder(shape=[], dtype=tf.float32, name='new_reward')
update_reward = tf.assign(last_reward, new_reward)
return last_reward, new_reward, update_reward
|
Creates state encoders for current and future observations.
Used for implementation of Curiosity-driven Exploration by Self-supervised Prediction
See https://arxiv.org/abs/1705.05363 for more details.
:return: current and future state encoder tensors.
def create_curiosity_encoders(self):
"""
Creates state encoders for current and future observations.
Used for implementation of Curiosity-driven Exploration by Self-supervised Prediction
See https://arxiv.org/abs/1705.05363 for more details.
:return: current and future state encoder tensors.
"""
encoded_state_list = []
encoded_next_state_list = []
if self.vis_obs_size > 0:
self.next_visual_in = []
visual_encoders = []
next_visual_encoders = []
for i in range(self.vis_obs_size):
# Create input ops for next (t+1) visual observations.
next_visual_input = self.create_visual_input(self.brain.camera_resolutions[i],
name="next_visual_observation_" + str(i))
self.next_visual_in.append(next_visual_input)
# Create the encoder ops for current and next visual input. Not that these encoders are siamese.
encoded_visual = self.create_visual_observation_encoder(self.visual_in[i], self.curiosity_enc_size,
self.swish, 1, "stream_{}_visual_obs_encoder"
.format(i), False)
encoded_next_visual = self.create_visual_observation_encoder(self.next_visual_in[i],
self.curiosity_enc_size,
self.swish, 1,
"stream_{}_visual_obs_encoder".format(i),
True)
visual_encoders.append(encoded_visual)
next_visual_encoders.append(encoded_next_visual)
hidden_visual = tf.concat(visual_encoders, axis=1)
hidden_next_visual = tf.concat(next_visual_encoders, axis=1)
encoded_state_list.append(hidden_visual)
encoded_next_state_list.append(hidden_next_visual)
if self.vec_obs_size > 0:
# Create the encoder ops for current and next vector input. Not that these encoders are siamese.
# Create input op for next (t+1) vector observation.
self.next_vector_in = tf.placeholder(shape=[None, self.vec_obs_size], dtype=tf.float32,
name='next_vector_observation')
encoded_vector_obs = self.create_vector_observation_encoder(self.vector_in,
self.curiosity_enc_size,
self.swish, 2, "vector_obs_encoder",
False)
encoded_next_vector_obs = self.create_vector_observation_encoder(self.next_vector_in,
self.curiosity_enc_size,
self.swish, 2,
"vector_obs_encoder",
True)
encoded_state_list.append(encoded_vector_obs)
encoded_next_state_list.append(encoded_next_vector_obs)
encoded_state = tf.concat(encoded_state_list, axis=1)
encoded_next_state = tf.concat(encoded_next_state_list, axis=1)
return encoded_state, encoded_next_state
|
Creates inverse model TensorFlow ops for Curiosity module.
Predicts action taken given current and future encoded states.
:param encoded_state: Tensor corresponding to encoded current state.
:param encoded_next_state: Tensor corresponding to encoded next state.
def create_inverse_model(self, encoded_state, encoded_next_state):
"""
Creates inverse model TensorFlow ops for Curiosity module.
Predicts action taken given current and future encoded states.
:param encoded_state: Tensor corresponding to encoded current state.
:param encoded_next_state: Tensor corresponding to encoded next state.
"""
combined_input = tf.concat([encoded_state, encoded_next_state], axis=1)
hidden = tf.layers.dense(combined_input, 256, activation=self.swish)
if self.brain.vector_action_space_type == "continuous":
pred_action = tf.layers.dense(hidden, self.act_size[0], activation=None)
squared_difference = tf.reduce_sum(tf.squared_difference(pred_action, self.selected_actions), axis=1)
self.inverse_loss = tf.reduce_mean(tf.dynamic_partition(squared_difference, self.mask, 2)[1])
else:
pred_action = tf.concat(
[tf.layers.dense(hidden, self.act_size[i], activation=tf.nn.softmax)
for i in range(len(self.act_size))], axis=1)
cross_entropy = tf.reduce_sum(-tf.log(pred_action + 1e-10) * self.selected_actions, axis=1)
self.inverse_loss = tf.reduce_mean(tf.dynamic_partition(cross_entropy, self.mask, 2)[1])
|
Creates forward model TensorFlow ops for Curiosity module.
Predicts encoded future state based on encoded current state and given action.
:param encoded_state: Tensor corresponding to encoded current state.
:param encoded_next_state: Tensor corresponding to encoded next state.
def create_forward_model(self, encoded_state, encoded_next_state):
"""
Creates forward model TensorFlow ops for Curiosity module.
Predicts encoded future state based on encoded current state and given action.
:param encoded_state: Tensor corresponding to encoded current state.
:param encoded_next_state: Tensor corresponding to encoded next state.
"""
combined_input = tf.concat([encoded_state, self.selected_actions], axis=1)
hidden = tf.layers.dense(combined_input, 256, activation=self.swish)
# We compare against the concatenation of all observation streams, hence `self.vis_obs_size + int(self.vec_obs_size > 0)`.
pred_next_state = tf.layers.dense(hidden, self.curiosity_enc_size * (self.vis_obs_size + int(self.vec_obs_size > 0)),
activation=None)
squared_difference = 0.5 * tf.reduce_sum(tf.squared_difference(pred_next_state, encoded_next_state), axis=1)
self.intrinsic_reward = tf.clip_by_value(self.curiosity_strength * squared_difference, 0, 1)
self.forward_loss = tf.reduce_mean(tf.dynamic_partition(squared_difference, self.mask, 2)[1])
|
Creates training-specific Tensorflow ops for PPO models.
:param probs: Current policy probabilities
:param old_probs: Past policy probabilities
:param value: Current value estimate
:param beta: Entropy regularization strength
:param entropy: Current policy entropy
:param epsilon: Value for policy-divergence threshold
:param lr: Learning rate
:param max_step: Total number of training steps.
def create_ppo_optimizer(self, probs, old_probs, value, entropy, beta, epsilon, lr, max_step):
"""
Creates training-specific Tensorflow ops for PPO models.
:param probs: Current policy probabilities
:param old_probs: Past policy probabilities
:param value: Current value estimate
:param beta: Entropy regularization strength
:param entropy: Current policy entropy
:param epsilon: Value for policy-divergence threshold
:param lr: Learning rate
:param max_step: Total number of training steps.
"""
self.returns_holder = tf.placeholder(shape=[None], dtype=tf.float32, name='discounted_rewards')
self.advantage = tf.placeholder(shape=[None, 1], dtype=tf.float32, name='advantages')
self.learning_rate = tf.train.polynomial_decay(lr, self.global_step, max_step, 1e-10, power=1.0)
self.old_value = tf.placeholder(shape=[None], dtype=tf.float32, name='old_value_estimates')
decay_epsilon = tf.train.polynomial_decay(epsilon, self.global_step, max_step, 0.1, power=1.0)
decay_beta = tf.train.polynomial_decay(beta, self.global_step, max_step, 1e-5, power=1.0)
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
clipped_value_estimate = self.old_value + tf.clip_by_value(tf.reduce_sum(value, axis=1) - self.old_value,
- decay_epsilon, decay_epsilon)
v_opt_a = tf.squared_difference(self.returns_holder, tf.reduce_sum(value, axis=1))
v_opt_b = tf.squared_difference(self.returns_holder, clipped_value_estimate)
self.value_loss = tf.reduce_mean(tf.dynamic_partition(tf.maximum(v_opt_a, v_opt_b), self.mask, 2)[1])
# Here we calculate PPO policy loss. In continuous control this is done independently for each action gaussian
# and then averaged together. This provides significantly better performance than treating the probability
# as an average of probabilities, or as a joint probability.
r_theta = tf.exp(probs - old_probs)
p_opt_a = r_theta * self.advantage
p_opt_b = tf.clip_by_value(r_theta, 1.0 - decay_epsilon, 1.0 + decay_epsilon) * self.advantage
self.policy_loss = -tf.reduce_mean(tf.dynamic_partition(tf.minimum(p_opt_a, p_opt_b), self.mask, 2)[1])
self.loss = self.policy_loss + 0.5 * self.value_loss - decay_beta * tf.reduce_mean(
tf.dynamic_partition(entropy, self.mask, 2)[1])
if self.use_curiosity:
self.loss += 10 * (0.2 * self.forward_loss + 0.8 * self.inverse_loss)
self.update_batch = optimizer.minimize(self.loss)
|
Evaluates policy for the agent experiences provided.
:param brain_info: BrainInfo object containing inputs.
:return: Outputs from network as defined by self.inference_dict.
def evaluate(self, brain_info):
"""
Evaluates policy for the agent experiences provided.
:param brain_info: BrainInfo object containing inputs.
:return: Outputs from network as defined by self.inference_dict.
"""
feed_dict = {self.model.batch_size: len(brain_info.vector_observations),
self.model.sequence_length: 1}
epsilon = None
if self.use_recurrent:
if not self.use_continuous_act:
feed_dict[self.model.prev_action] = brain_info.previous_vector_actions.reshape(
[-1, len(self.model.act_size)])
if brain_info.memories.shape[1] == 0:
brain_info.memories = self.make_empty_memory(len(brain_info.agents))
feed_dict[self.model.memory_in] = brain_info.memories
if self.use_continuous_act:
epsilon = np.random.normal(
size=(len(brain_info.vector_observations), self.model.act_size[0]))
feed_dict[self.model.epsilon] = epsilon
feed_dict = self._fill_eval_dict(feed_dict, brain_info)
run_out = self._execute_model(feed_dict, self.inference_dict)
if self.use_continuous_act:
run_out['random_normal_epsilon'] = epsilon
return run_out
|
Updates model using buffer.
:param num_sequences: Number of trajectories in batch.
:param mini_batch: Experience batch.
:return: Output from update process.
def update(self, mini_batch, num_sequences):
"""
Updates model using buffer.
:param num_sequences: Number of trajectories in batch.
:param mini_batch: Experience batch.
:return: Output from update process.
"""
feed_dict = {self.model.batch_size: num_sequences,
self.model.sequence_length: self.sequence_length,
self.model.mask_input: mini_batch['masks'].flatten(),
self.model.returns_holder: mini_batch['discounted_returns'].flatten(),
self.model.old_value: mini_batch['value_estimates'].flatten(),
self.model.advantage: mini_batch['advantages'].reshape([-1, 1]),
self.model.all_old_log_probs: mini_batch['action_probs'].reshape(
[-1, sum(self.model.act_size)])}
if self.use_continuous_act:
feed_dict[self.model.output_pre] = mini_batch['actions_pre'].reshape(
[-1, self.model.act_size[0]])
feed_dict[self.model.epsilon] = mini_batch['random_normal_epsilon'].reshape(
[-1, self.model.act_size[0]])
else:
feed_dict[self.model.action_holder] = mini_batch['actions'].reshape(
[-1, len(self.model.act_size)])
if self.use_recurrent:
feed_dict[self.model.prev_action] = mini_batch['prev_action'].reshape(
[-1, len(self.model.act_size)])
feed_dict[self.model.action_masks] = mini_batch['action_mask'].reshape(
[-1, sum(self.brain.vector_action_space_size)])
if self.use_vec_obs:
feed_dict[self.model.vector_in] = mini_batch['vector_obs'].reshape(
[-1, self.vec_obs_size])
if self.use_curiosity:
feed_dict[self.model.next_vector_in] = mini_batch['next_vector_in'].reshape(
[-1, self.vec_obs_size])
if self.model.vis_obs_size > 0:
for i, _ in enumerate(self.model.visual_in):
_obs = mini_batch['visual_obs%d' % i]
if self.sequence_length > 1 and self.use_recurrent:
(_batch, _seq, _w, _h, _c) = _obs.shape
feed_dict[self.model.visual_in[i]] = _obs.reshape([-1, _w, _h, _c])
else:
feed_dict[self.model.visual_in[i]] = _obs
if self.use_curiosity:
for i, _ in enumerate(self.model.visual_in):
_obs = mini_batch['next_visual_obs%d' % i]
if self.sequence_length > 1 and self.use_recurrent:
(_batch, _seq, _w, _h, _c) = _obs.shape
feed_dict[self.model.next_visual_in[i]] = _obs.reshape([-1, _w, _h, _c])
else:
feed_dict[self.model.next_visual_in[i]] = _obs
if self.use_recurrent:
mem_in = mini_batch['memory'][:, 0, :]
feed_dict[self.model.memory_in] = mem_in
self.has_updated = True
run_out = self._execute_model(feed_dict, self.update_dict)
return run_out
|
Generates intrinsic reward used for Curiosity-based training.
:BrainInfo curr_info: Current BrainInfo.
:BrainInfo next_info: Next BrainInfo.
:return: Intrinsic rewards for all agents.
def get_intrinsic_rewards(self, curr_info, next_info):
"""
Generates intrinsic reward used for Curiosity-based training.
:BrainInfo curr_info: Current BrainInfo.
:BrainInfo next_info: Next BrainInfo.
:return: Intrinsic rewards for all agents.
"""
if self.use_curiosity:
if len(curr_info.agents) == 0:
return []
feed_dict = {self.model.batch_size: len(next_info.vector_observations),
self.model.sequence_length: 1}
if self.use_continuous_act:
feed_dict[self.model.selected_actions] = next_info.previous_vector_actions
else:
feed_dict[self.model.action_holder] = next_info.previous_vector_actions
for i in range(self.model.vis_obs_size):
feed_dict[self.model.visual_in[i]] = curr_info.visual_observations[i]
feed_dict[self.model.next_visual_in[i]] = next_info.visual_observations[i]
if self.use_vec_obs:
feed_dict[self.model.vector_in] = curr_info.vector_observations
feed_dict[self.model.next_vector_in] = next_info.vector_observations
if self.use_recurrent:
if curr_info.memories.shape[1] == 0:
curr_info.memories = self.make_empty_memory(len(curr_info.agents))
feed_dict[self.model.memory_in] = curr_info.memories
intrinsic_rewards = self.sess.run(self.model.intrinsic_reward,
feed_dict=feed_dict) * float(self.has_updated)
return intrinsic_rewards
else:
return None
|
Generates value estimates for bootstrapping.
:param brain_info: BrainInfo to be used for bootstrapping.
:param idx: Index in BrainInfo of agent.
:return: Value estimate.
def get_value_estimate(self, brain_info, idx):
"""
Generates value estimates for bootstrapping.
:param brain_info: BrainInfo to be used for bootstrapping.
:param idx: Index in BrainInfo of agent.
:return: Value estimate.
"""
feed_dict = {self.model.batch_size: 1, self.model.sequence_length: 1}
for i in range(len(brain_info.visual_observations)):
feed_dict[self.model.visual_in[i]] = [brain_info.visual_observations[i][idx]]
if self.use_vec_obs:
feed_dict[self.model.vector_in] = [brain_info.vector_observations[idx]]
if self.use_recurrent:
if brain_info.memories.shape[1] == 0:
brain_info.memories = self.make_empty_memory(len(brain_info.agents))
feed_dict[self.model.memory_in] = [brain_info.memories[idx]]
if not self.use_continuous_act and self.use_recurrent:
feed_dict[self.model.prev_action] = brain_info.previous_vector_actions[idx].reshape(
[-1, len(self.model.act_size)])
value_estimate = self.sess.run(self.model.value, feed_dict)
return value_estimate
|
Updates reward value for policy.
:param new_reward: New reward to save.
def update_reward(self, new_reward):
"""
Updates reward value for policy.
:param new_reward: New reward to save.
"""
self.sess.run(self.model.update_reward,
feed_dict={self.model.new_reward: new_reward})
|
Adds experiences to each agent's experience history.
:param curr_info: Current AllBrainInfo (Dictionary of all current brains and corresponding BrainInfo).
:param next_info: Next AllBrainInfo (Dictionary of all current brains and corresponding BrainInfo).
:param take_action_outputs: The outputs of the take action method.
def add_experiences(self, curr_info: AllBrainInfo, next_info: AllBrainInfo,
take_action_outputs):
"""
Adds experiences to each agent's experience history.
:param curr_info: Current AllBrainInfo (Dictionary of all current brains and corresponding BrainInfo).
:param next_info: Next AllBrainInfo (Dictionary of all current brains and corresponding BrainInfo).
:param take_action_outputs: The outputs of the take action method.
"""
# Used to collect information about student performance.
info_student = curr_info[self.brain_name]
next_info_student = next_info[self.brain_name]
for agent_id in info_student.agents:
self.evaluation_buffer[agent_id].last_brain_info = info_student
for agent_id in next_info_student.agents:
stored_info_student = self.evaluation_buffer[agent_id].last_brain_info
if stored_info_student is None:
continue
else:
next_idx = next_info_student.agents.index(agent_id)
if agent_id not in self.cumulative_rewards:
self.cumulative_rewards[agent_id] = 0
self.cumulative_rewards[agent_id] += next_info_student.rewards[next_idx]
if not next_info_student.local_done[next_idx]:
if agent_id not in self.episode_steps:
self.episode_steps[agent_id] = 0
self.episode_steps[agent_id] += 1
|
Checks agent histories for processing condition, and processes them as necessary.
Processing involves calculating value and advantage targets for model updating step.
:param current_info: Current AllBrainInfo
:param next_info: Next AllBrainInfo
def process_experiences(self, current_info: AllBrainInfo, next_info: AllBrainInfo):
"""
Checks agent histories for processing condition, and processes them as necessary.
Processing involves calculating value and advantage targets for model updating step.
:param current_info: Current AllBrainInfo
:param next_info: Next AllBrainInfo
"""
info_student = next_info[self.brain_name]
for l in range(len(info_student.agents)):
if info_student.local_done[l]:
agent_id = info_student.agents[l]
self.stats['Environment/Cumulative Reward'].append(
self.cumulative_rewards.get(agent_id, 0))
self.stats['Environment/Episode Length'].append(
self.episode_steps.get(agent_id, 0))
self.cumulative_rewards[agent_id] = 0
self.episode_steps[agent_id] = 0
|
A signal that the Episode has ended. The buffer must be reset.
Get only called when the academy resets.
def end_episode(self):
"""
A signal that the Episode has ended. The buffer must be reset.
Get only called when the academy resets.
"""
self.evaluation_buffer.reset_local_buffers()
for agent_id in self.cumulative_rewards:
self.cumulative_rewards[agent_id] = 0
for agent_id in self.episode_steps:
self.episode_steps[agent_id] = 0
|
Updates the policy.
def update_policy(self):
"""
Updates the policy.
"""
self.demonstration_buffer.update_buffer.shuffle()
batch_losses = []
num_batches = min(len(self.demonstration_buffer.update_buffer['actions']) //
self.n_sequences, self.batches_per_epoch)
for i in range(num_batches):
update_buffer = self.demonstration_buffer.update_buffer
start = i * self.n_sequences
end = (i + 1) * self.n_sequences
mini_batch = update_buffer.make_mini_batch(start, end)
run_out = self.policy.update(mini_batch, self.n_sequences)
loss = run_out['policy_loss']
batch_losses.append(loss)
if len(batch_losses) > 0:
self.stats['Losses/Cloning Loss'].append(np.mean(batch_losses))
else:
self.stats['Losses/Cloning Loss'].append(0)
|
Creates TF ops to track and increment global training step.
def create_global_steps():
"""Creates TF ops to track and increment global training step."""
global_step = tf.Variable(0, name="global_step", trainable=False, dtype=tf.int32)
increment_step = tf.assign(global_step, tf.add(global_step, 1))
return global_step, increment_step
|
Creates image input op.
:param camera_parameters: Parameters for visual observation from BrainInfo.
:param name: Desired name of input op.
:return: input op.
def create_visual_input(camera_parameters, name):
"""
Creates image input op.
:param camera_parameters: Parameters for visual observation from BrainInfo.
:param name: Desired name of input op.
:return: input op.
"""
o_size_h = camera_parameters['height']
o_size_w = camera_parameters['width']
bw = camera_parameters['blackAndWhite']
if bw:
c_channels = 1
else:
c_channels = 3
visual_in = tf.placeholder(shape=[None, o_size_h, o_size_w, c_channels], dtype=tf.float32,
name=name)
return visual_in
|
Creates ops for vector observation input.
:param name: Name of the placeholder op.
:param vec_obs_size: Size of stacked vector observation.
:return:
def create_vector_input(self, name='vector_observation'):
"""
Creates ops for vector observation input.
:param name: Name of the placeholder op.
:param vec_obs_size: Size of stacked vector observation.
:return:
"""
self.vector_in = tf.placeholder(shape=[None, self.vec_obs_size], dtype=tf.float32,
name=name)
if self.normalize:
self.running_mean = tf.get_variable("running_mean", [self.vec_obs_size],
trainable=False, dtype=tf.float32,
initializer=tf.zeros_initializer())
self.running_variance = tf.get_variable("running_variance", [self.vec_obs_size],
trainable=False,
dtype=tf.float32,
initializer=tf.ones_initializer())
self.update_mean, self.update_variance = self.create_normalizer_update(self.vector_in)
self.normalized_state = tf.clip_by_value((self.vector_in - self.running_mean) / tf.sqrt(
self.running_variance / (tf.cast(self.global_step, tf.float32) + 1)), -5, 5,
name="normalized_state")
return self.normalized_state
else:
return self.vector_in
|
Builds a set of hidden state encoders.
:param reuse: Whether to re-use the weights within the same scope.
:param scope: Graph scope for the encoder ops.
:param observation_input: Input vector.
:param h_size: Hidden layer size.
:param activation: What type of activation function to use for layers.
:param num_layers: number of hidden layers to create.
:return: List of hidden layer tensors.
def create_vector_observation_encoder(observation_input, h_size, activation, num_layers, scope,
reuse):
"""
Builds a set of hidden state encoders.
:param reuse: Whether to re-use the weights within the same scope.
:param scope: Graph scope for the encoder ops.
:param observation_input: Input vector.
:param h_size: Hidden layer size.
:param activation: What type of activation function to use for layers.
:param num_layers: number of hidden layers to create.
:return: List of hidden layer tensors.
"""
with tf.variable_scope(scope):
hidden = observation_input
for i in range(num_layers):
hidden = tf.layers.dense(hidden, h_size, activation=activation, reuse=reuse,
name="hidden_{}".format(i),
kernel_initializer=c_layers.variance_scaling_initializer(
1.0))
return hidden
|
Builds a set of visual (CNN) encoders.
:param reuse: Whether to re-use the weights within the same scope.
:param scope: The scope of the graph within which to create the ops.
:param image_input: The placeholder for the image input to use.
:param h_size: Hidden layer size.
:param activation: What type of activation function to use for layers.
:param num_layers: number of hidden layers to create.
:return: List of hidden layer tensors.
def create_visual_observation_encoder(self, image_input, h_size, activation, num_layers, scope,
reuse):
"""
Builds a set of visual (CNN) encoders.
:param reuse: Whether to re-use the weights within the same scope.
:param scope: The scope of the graph within which to create the ops.
:param image_input: The placeholder for the image input to use.
:param h_size: Hidden layer size.
:param activation: What type of activation function to use for layers.
:param num_layers: number of hidden layers to create.
:return: List of hidden layer tensors.
"""
with tf.variable_scope(scope):
conv1 = tf.layers.conv2d(image_input, 16, kernel_size=[8, 8], strides=[4, 4],
activation=tf.nn.elu, reuse=reuse, name="conv_1")
conv2 = tf.layers.conv2d(conv1, 32, kernel_size=[4, 4], strides=[2, 2],
activation=tf.nn.elu, reuse=reuse, name="conv_2")
hidden = c_layers.flatten(conv2)
with tf.variable_scope(scope + '/' + 'flat_encoding'):
hidden_flat = self.create_vector_observation_encoder(hidden, h_size, activation,
num_layers, scope, reuse)
return hidden_flat
|
Creates a masking layer for the discrete actions
:param all_logits: The concatenated unnormalized action probabilities for all branches
:param action_masks: The mask for the logits. Must be of dimension [None x total_number_of_action]
:param action_size: A list containing the number of possible actions for each branch
:return: The action output dimension [batch_size, num_branches] and the concatenated normalized logits
def create_discrete_action_masking_layer(all_logits, action_masks, action_size):
"""
Creates a masking layer for the discrete actions
:param all_logits: The concatenated unnormalized action probabilities for all branches
:param action_masks: The mask for the logits. Must be of dimension [None x total_number_of_action]
:param action_size: A list containing the number of possible actions for each branch
:return: The action output dimension [batch_size, num_branches] and the concatenated normalized logits
"""
action_idx = [0] + list(np.cumsum(action_size))
branches_logits = [all_logits[:, action_idx[i]:action_idx[i + 1]] for i in range(len(action_size))]
branch_masks = [action_masks[:, action_idx[i]:action_idx[i + 1]] for i in range(len(action_size))]
raw_probs = [tf.multiply(tf.nn.softmax(branches_logits[k]) + 1.0e-10, branch_masks[k])
for k in range(len(action_size))]
normalized_probs = [
tf.divide(raw_probs[k], tf.reduce_sum(raw_probs[k], axis=1, keepdims=True))
for k in range(len(action_size))]
output = tf.concat([tf.multinomial(tf.log(normalized_probs[k]), 1) for k in range(len(action_size))], axis=1)
return output, tf.concat([tf.log(normalized_probs[k] + 1.0e-10) for k in range(len(action_size))], axis=1)
|
Creates encoding stream for observations.
:param num_streams: Number of streams to create.
:param h_size: Size of hidden linear layers in stream.
:param num_layers: Number of hidden linear layers in stream.
:return: List of encoded streams.
def create_observation_streams(self, num_streams, h_size, num_layers):
"""
Creates encoding stream for observations.
:param num_streams: Number of streams to create.
:param h_size: Size of hidden linear layers in stream.
:param num_layers: Number of hidden linear layers in stream.
:return: List of encoded streams.
"""
brain = self.brain
activation_fn = self.swish
self.visual_in = []
for i in range(brain.number_visual_observations):
visual_input = self.create_visual_input(brain.camera_resolutions[i],
name="visual_observation_" + str(i))
self.visual_in.append(visual_input)
vector_observation_input = self.create_vector_input()
final_hiddens = []
for i in range(num_streams):
visual_encoders = []
hidden_state, hidden_visual = None, None
if self.vis_obs_size > 0:
for j in range(brain.number_visual_observations):
encoded_visual = self.create_visual_observation_encoder(self.visual_in[j],
h_size,
activation_fn,
num_layers,
"main_graph_{}_encoder{}"
.format(i, j), False)
visual_encoders.append(encoded_visual)
hidden_visual = tf.concat(visual_encoders, axis=1)
if brain.vector_observation_space_size > 0:
hidden_state = self.create_vector_observation_encoder(vector_observation_input,
h_size, activation_fn,
num_layers,
"main_graph_{}".format(i),
False)
if hidden_state is not None and hidden_visual is not None:
final_hidden = tf.concat([hidden_visual, hidden_state], axis=1)
elif hidden_state is None and hidden_visual is not None:
final_hidden = hidden_visual
elif hidden_state is not None and hidden_visual is None:
final_hidden = hidden_state
else:
raise Exception("No valid network configuration possible. "
"There are no states or observations in this brain")
final_hiddens.append(final_hidden)
return final_hiddens
|
Builds a recurrent encoder for either state or observations (LSTM).
:param sequence_length: Length of sequence to unroll.
:param input_state: The input tensor to the LSTM cell.
:param memory_in: The input memory to the LSTM cell.
:param name: The scope of the LSTM cell.
def create_recurrent_encoder(input_state, memory_in, sequence_length, name='lstm'):
"""
Builds a recurrent encoder for either state or observations (LSTM).
:param sequence_length: Length of sequence to unroll.
:param input_state: The input tensor to the LSTM cell.
:param memory_in: The input memory to the LSTM cell.
:param name: The scope of the LSTM cell.
"""
s_size = input_state.get_shape().as_list()[1]
m_size = memory_in.get_shape().as_list()[1]
lstm_input_state = tf.reshape(input_state, shape=[-1, sequence_length, s_size])
memory_in = tf.reshape(memory_in[:, :], [-1, m_size])
_half_point = int(m_size / 2)
with tf.variable_scope(name):
rnn_cell = tf.contrib.rnn.BasicLSTMCell(_half_point)
lstm_vector_in = tf.contrib.rnn.LSTMStateTuple(memory_in[:, :_half_point],
memory_in[:, _half_point:])
recurrent_output, lstm_state_out = tf.nn.dynamic_rnn(rnn_cell, lstm_input_state,
initial_state=lstm_vector_in)
recurrent_output = tf.reshape(recurrent_output, shape=[-1, _half_point])
return recurrent_output, tf.concat([lstm_state_out.c, lstm_state_out.h], axis=1)
|
Creates Continuous control actor-critic model.
:param h_size: Size of hidden linear layers.
:param num_layers: Number of hidden linear layers.
def create_cc_actor_critic(self, h_size, num_layers):
"""
Creates Continuous control actor-critic model.
:param h_size: Size of hidden linear layers.
:param num_layers: Number of hidden linear layers.
"""
hidden_streams = self.create_observation_streams(2, h_size, num_layers)
if self.use_recurrent:
self.memory_in = tf.placeholder(shape=[None, self.m_size], dtype=tf.float32,
name='recurrent_in')
_half_point = int(self.m_size / 2)
hidden_policy, memory_policy_out = self.create_recurrent_encoder(
hidden_streams[0], self.memory_in[:, :_half_point], self.sequence_length,
name='lstm_policy')
hidden_value, memory_value_out = self.create_recurrent_encoder(
hidden_streams[1], self.memory_in[:, _half_point:], self.sequence_length,
name='lstm_value')
self.memory_out = tf.concat([memory_policy_out, memory_value_out], axis=1,
name='recurrent_out')
else:
hidden_policy = hidden_streams[0]
hidden_value = hidden_streams[1]
mu = tf.layers.dense(hidden_policy, self.act_size[0], activation=None,
kernel_initializer=c_layers.variance_scaling_initializer(factor=0.01))
log_sigma_sq = tf.get_variable("log_sigma_squared", [self.act_size[0]], dtype=tf.float32,
initializer=tf.zeros_initializer())
sigma_sq = tf.exp(log_sigma_sq)
self.epsilon = tf.placeholder(shape=[None, self.act_size[0]], dtype=tf.float32, name='epsilon')
# Clip and scale output to ensure actions are always within [-1, 1] range.
self.output_pre = mu + tf.sqrt(sigma_sq) * self.epsilon
output_post = tf.clip_by_value(self.output_pre, -3, 3) / 3
self.output = tf.identity(output_post, name='action')
self.selected_actions = tf.stop_gradient(output_post)
# Compute probability of model output.
all_probs = - 0.5 * tf.square(tf.stop_gradient(self.output_pre) - mu) / sigma_sq \
- 0.5 * tf.log(2.0 * np.pi) - 0.5 * log_sigma_sq
self.all_log_probs = tf.identity(all_probs, name='action_probs')
self.entropy = 0.5 * tf.reduce_mean(tf.log(2 * np.pi * np.e) + log_sigma_sq)
value = tf.layers.dense(hidden_value, 1, activation=None)
self.value = tf.identity(value, name="value_estimate")
self.all_old_log_probs = tf.placeholder(shape=[None, self.act_size[0]], dtype=tf.float32,
name='old_probabilities')
# We keep these tensors the same name, but use new nodes to keep code parallelism with discrete control.
self.log_probs = tf.reduce_sum((tf.identity(self.all_log_probs)), axis=1, keepdims=True)
self.old_log_probs = tf.reduce_sum((tf.identity(self.all_old_log_probs)), axis=1,
keepdims=True)
|
Creates Discrete control actor-critic model.
:param h_size: Size of hidden linear layers.
:param num_layers: Number of hidden linear layers.
def create_dc_actor_critic(self, h_size, num_layers):
"""
Creates Discrete control actor-critic model.
:param h_size: Size of hidden linear layers.
:param num_layers: Number of hidden linear layers.
"""
hidden_streams = self.create_observation_streams(1, h_size, num_layers)
hidden = hidden_streams[0]
if self.use_recurrent:
self.prev_action = tf.placeholder(shape=[None, len(self.act_size)], dtype=tf.int32,
name='prev_action')
prev_action_oh = tf.concat([
tf.one_hot(self.prev_action[:, i], self.act_size[i]) for i in
range(len(self.act_size))], axis=1)
hidden = tf.concat([hidden, prev_action_oh], axis=1)
self.memory_in = tf.placeholder(shape=[None, self.m_size], dtype=tf.float32,
name='recurrent_in')
hidden, memory_out = self.create_recurrent_encoder(hidden, self.memory_in,
self.sequence_length)
self.memory_out = tf.identity(memory_out, name='recurrent_out')
policy_branches = []
for size in self.act_size:
policy_branches.append(tf.layers.dense(hidden, size, activation=None, use_bias=False,
kernel_initializer=c_layers.variance_scaling_initializer(factor=0.01)))
self.all_log_probs = tf.concat([branch for branch in policy_branches], axis=1, name="action_probs")
self.action_masks = tf.placeholder(shape=[None, sum(self.act_size)], dtype=tf.float32, name="action_masks")
output, normalized_logits = self.create_discrete_action_masking_layer(
self.all_log_probs, self.action_masks, self.act_size)
self.output = tf.identity(output)
self.normalized_logits = tf.identity(normalized_logits, name='action')
value = tf.layers.dense(hidden, 1, activation=None)
self.value = tf.identity(value, name="value_estimate")
self.action_holder = tf.placeholder(
shape=[None, len(policy_branches)], dtype=tf.int32, name="action_holder")
self.action_oh = tf.concat([
tf.one_hot(self.action_holder[:, i], self.act_size[i]) for i in range(len(self.act_size))], axis=1)
self.selected_actions = tf.stop_gradient(self.action_oh)
self.all_old_log_probs = tf.placeholder(
shape=[None, sum(self.act_size)], dtype=tf.float32, name='old_probabilities')
_, old_normalized_logits = self.create_discrete_action_masking_layer(
self.all_old_log_probs, self.action_masks, self.act_size)
action_idx = [0] + list(np.cumsum(self.act_size))
self.entropy = tf.reduce_sum((tf.stack([
tf.nn.softmax_cross_entropy_with_logits_v2(
labels=tf.nn.softmax(self.all_log_probs[:, action_idx[i]:action_idx[i + 1]]),
logits=self.all_log_probs[:, action_idx[i]:action_idx[i + 1]])
for i in range(len(self.act_size))], axis=1)), axis=1)
self.log_probs = tf.reduce_sum((tf.stack([
-tf.nn.softmax_cross_entropy_with_logits_v2(
labels=self.action_oh[:, action_idx[i]:action_idx[i + 1]],
logits=normalized_logits[:, action_idx[i]:action_idx[i + 1]]
)
for i in range(len(self.act_size))], axis=1)), axis=1, keepdims=True)
self.old_log_probs = tf.reduce_sum((tf.stack([
-tf.nn.softmax_cross_entropy_with_logits_v2(
labels=self.action_oh[:, action_idx[i]:action_idx[i + 1]],
logits=old_normalized_logits[:, action_idx[i]:action_idx[i + 1]]
)
for i in range(len(self.act_size))], axis=1)), axis=1, keepdims=True)
|
Adds experiences to each agent's experience history.
:param curr_info: Current AllBrainInfo (Dictionary of all current brains and corresponding BrainInfo).
:param next_info: Next AllBrainInfo (Dictionary of all current brains and corresponding BrainInfo).
:param take_action_outputs: The outputs of the take action method.
def add_experiences(self, curr_info: AllBrainInfo, next_info: AllBrainInfo,
take_action_outputs):
"""
Adds experiences to each agent's experience history.
:param curr_info: Current AllBrainInfo (Dictionary of all current brains and corresponding BrainInfo).
:param next_info: Next AllBrainInfo (Dictionary of all current brains and corresponding BrainInfo).
:param take_action_outputs: The outputs of the take action method.
"""
# Used to collect teacher experience into training buffer
info_teacher = curr_info[self.brain_to_imitate]
next_info_teacher = next_info[self.brain_to_imitate]
for agent_id in info_teacher.agents:
self.demonstration_buffer[agent_id].last_brain_info = info_teacher
for agent_id in next_info_teacher.agents:
stored_info_teacher = self.demonstration_buffer[agent_id].last_brain_info
if stored_info_teacher is None:
continue
else:
idx = stored_info_teacher.agents.index(agent_id)
next_idx = next_info_teacher.agents.index(agent_id)
if stored_info_teacher.text_observations[idx] != "":
info_teacher_record, info_teacher_reset = \
stored_info_teacher.text_observations[idx].lower().split(",")
next_info_teacher_record, next_info_teacher_reset = \
next_info_teacher.text_observations[idx]. \
lower().split(",")
if next_info_teacher_reset == "true":
self.demonstration_buffer.reset_update_buffer()
else:
info_teacher_record, next_info_teacher_record = "true", "true"
if info_teacher_record == "true" and next_info_teacher_record == "true":
if not stored_info_teacher.local_done[idx]:
for i in range(self.policy.vis_obs_size):
self.demonstration_buffer[agent_id]['visual_obs%d' % i] \
.append(stored_info_teacher.visual_observations[i][idx])
if self.policy.use_vec_obs:
self.demonstration_buffer[agent_id]['vector_obs'] \
.append(stored_info_teacher.vector_observations[idx])
if self.policy.use_recurrent:
if stored_info_teacher.memories.shape[1] == 0:
stored_info_teacher.memories = np.zeros(
(len(stored_info_teacher.agents),
self.policy.m_size))
self.demonstration_buffer[agent_id]['memory'].append(
stored_info_teacher.memories[idx])
self.demonstration_buffer[agent_id]['actions'].append(
next_info_teacher.previous_vector_actions[next_idx])
super(OnlineBCTrainer, self).add_experiences(curr_info, next_info, take_action_outputs)
|
Checks agent histories for processing condition, and processes them as necessary.
Processing involves calculating value and advantage targets for model updating step.
:param current_info: Current AllBrainInfo
:param next_info: Next AllBrainInfo
def process_experiences(self, current_info: AllBrainInfo, next_info: AllBrainInfo):
"""
Checks agent histories for processing condition, and processes them as necessary.
Processing involves calculating value and advantage targets for model updating step.
:param current_info: Current AllBrainInfo
:param next_info: Next AllBrainInfo
"""
info_teacher = next_info[self.brain_to_imitate]
for l in range(len(info_teacher.agents)):
teacher_action_list = len(self.demonstration_buffer[info_teacher.agents[l]]['actions'])
horizon_reached = teacher_action_list > self.trainer_parameters['time_horizon']
teacher_filled = len(self.demonstration_buffer[info_teacher.agents[l]]['actions']) > 0
if (info_teacher.local_done[l] or horizon_reached) and teacher_filled:
agent_id = info_teacher.agents[l]
self.demonstration_buffer.append_update_buffer(
agent_id, batch_size=None, training_length=self.policy.sequence_length)
self.demonstration_buffer[agent_id].reset_agent()
super(OnlineBCTrainer, self).process_experiences(current_info, next_info)
|
Yield items from any nested iterable; see REF.
def flatten(items,enter=lambda x:isinstance(x, list)):
# http://stackoverflow.com/a/40857703
# https://github.com/ctmakro/canton/blob/master/canton/misc.py
"""Yield items from any nested iterable; see REF."""
for x in items:
if enter(x):
yield from flatten(x)
else:
yield x
|
A value in replace_with_strings can be either single string or list of strings
def replace_strings_in_list(array_of_strigs, replace_with_strings):
"A value in replace_with_strings can be either single string or list of strings"
potentially_nested_list = [replace_with_strings.get(s) or s for s in array_of_strigs]
return list(flatten(potentially_nested_list))
|
Preserves the order of elements in the list
def remove_duplicates_from_list(array):
"Preserves the order of elements in the list"
output = []
unique = set()
for a in array:
if a not in unique:
unique.add(a)
output.append(a)
return output
|
Convert from NHWC|NCHW => HW
def pool_to_HW(shape, data_frmt):
""" Convert from NHWC|NCHW => HW
"""
if len(shape) != 4:
return shape # Not NHWC|NCHW, return as is
if data_frmt == 'NCHW':
return [shape[2], shape[3]]
return [shape[1], shape[2]]
|
Converts a TensorFlow model into a Barracuda model.
:param source_file: The TensorFlow Model
:param target_file: The name of the file the converted model will be saved to
:param trim_unused_by_output: The regexp to match output nodes to remain in the model. All other uconnected nodes will be removed.
:param verbose: If True, will display debug messages
:param compress_f16: If true, the float values will be converted to f16
:return:
def convert(source_file, target_file, trim_unused_by_output="", verbose=False, compress_f16=False):
"""
Converts a TensorFlow model into a Barracuda model.
:param source_file: The TensorFlow Model
:param target_file: The name of the file the converted model will be saved to
:param trim_unused_by_output: The regexp to match output nodes to remain in the model. All other uconnected nodes will be removed.
:param verbose: If True, will display debug messages
:param compress_f16: If true, the float values will be converted to f16
:return:
"""
if (type(verbose)==bool):
args = Struct()
args.verbose = verbose
args.print_layers = verbose
args.print_source_json = verbose
args.print_barracuda_json = verbose
args.print_layer_links = verbose
args.print_patterns = verbose
args.print_tensors = verbose
else:
args = verbose
# Load Tensorflow model
print("Converting %s to %s" % (source_file, target_file))
f = open(source_file, 'rb')
i_model = tf.GraphDef()
i_model.ParseFromString(f.read())
if args.verbose:
print('OP_TYPES:', {layer.op for layer in i_model.node})
if args.print_source_json or args.verbose:
for layer in i_model.node:
if not layer.op == 'Const':
print('MODEL:', MessageToJson(layer) + ",")
# Convert
o_model = barracuda.Model()
o_model.layers, o_input_shapes, o_model.tensors, o_model.memories = \
process_model(i_model, args)
# Cleanup unconnected Identities (they might linger after processing complex node patterns like LSTM)
def cleanup_layers(layers):
all_layers = {l.name for l in layers}
all_inputs = {i for l in layers for i in l.inputs}
def is_unconnected_identity(layer):
if layer.class_name == 'Activation' and layer.activation == 0: # Identity
assert(len(layer.inputs) == 1)
if layer.inputs[0] not in all_layers and layer.name not in all_inputs:
return True;
return False;
return [l for l in layers if not is_unconnected_identity(l)]
o_model.layers = cleanup_layers(o_model.layers)
all_inputs = {i for l in o_model.layers for i in l.inputs}
embedded_tensors = {t.name for l in o_model.layers for t in l.tensors}
# Find global tensors
def dims_to_barracuda_shape(dims):
shape = list(dims)
while len(shape) < 4:
shape = [1] + shape
return shape
o_model.globals = [t for t in o_model.tensors if t not in all_inputs and t not in embedded_tensors]
#for x in global_tensors:
# shape = dims_to_barracuda_shape(get_tensor_dims(o_model.tensors[x]))
# o_globals += [Struct(
# name = x,
# shape = shape,
# data = np.reshape(get_tensor_data(o_model.tensors[x]), shape).astype(np.float32))]
# Trim
if trim_unused_by_output:
o_model.layers = barracuda.trim(o_model.layers, trim_unused_by_output, args.verbose)
# Create load layers for constants
const_tensors = [i for i in all_inputs if i in o_model.tensors]
const_tensors += o_model.globals
for x in const_tensors:
shape = dims_to_barracuda_shape(get_tensor_dims(o_model.tensors[x]))
o_l = Struct(
type = 255, # Load
class_name = "Const",
name = x,
pads = [0,0,0,0],
strides = [],
pool_size = [],
axis = -1,
alpha = 1,
beta = 0,
activation = 0,
inputs = [],
tensors = [Struct(
name = x,
shape = shape,
data = np.reshape(get_tensor_data(o_model.tensors[x]), shape).astype(np.float32))]
)
o_model.layers.insert(0, o_l)
# Find model inputs & outputs
all_layers = {l.name for l in o_model.layers}
# global inputs => are inputs that are NOT connected to any layer in the network
# global outputs => are outputs that are NOT feeding any layer in the network OR are coming from Identity layers
o_model.inputs = {i:o_input_shapes[i] for l in o_model.layers for i in l.inputs if i not in all_layers and i not in o_model.memories}
def is_output_layer(layer):
if layer.class_name == 'Const': # Constants never count as global output even when unconnected
return False;
if layer.name not in all_inputs: # this layer is not inputing to any other layer
return True
if layer.class_name == 'Activation' and layer.activation == 0: # Identity marks global output
return True
return False
o_model.outputs = [l.name for l in o_model.layers if is_output_layer(l)]
# Compress
if compress_f16:
o_model = barracuda.compress(o_model)
# Sort model so that layer inputs are always ready upfront
o_model.layers = barracuda.sort(o_model.layers, o_model.inputs, o_model.memories, args.verbose)
# Summary
barracuda.summary(o_model,
print_layer_links = args.print_layer_links or args.verbose,
print_barracuda_json = args.print_barracuda_json or args.verbose,
print_tensors = args.print_tensors or args.verbose)
# Write to file
barracuda.write(o_model, target_file)
print('DONE: wrote', target_file, 'file.')
|
Loads demonstration file and uses it to fill training buffer.
:param file_path: Location of demonstration file (.demo).
:param sequence_length: Length of trajectories to fill buffer.
:return:
def demo_to_buffer(file_path, sequence_length):
"""
Loads demonstration file and uses it to fill training buffer.
:param file_path: Location of demonstration file (.demo).
:param sequence_length: Length of trajectories to fill buffer.
:return:
"""
brain_params, brain_infos, _ = load_demonstration(file_path)
demo_buffer = make_demo_buffer(brain_infos, brain_params, sequence_length)
return brain_params, demo_buffer
|
Loads and parses a demonstration file.
:param file_path: Location of demonstration file (.demo).
:return: BrainParameter and list of BrainInfos containing demonstration data.
def load_demonstration(file_path):
"""
Loads and parses a demonstration file.
:param file_path: Location of demonstration file (.demo).
:return: BrainParameter and list of BrainInfos containing demonstration data.
"""
# First 32 bytes of file dedicated to meta-data.
INITIAL_POS = 33
if not os.path.isfile(file_path):
raise FileNotFoundError("The demonstration file {} does not exist.".format(file_path))
file_extension = pathlib.Path(file_path).suffix
if file_extension != '.demo':
raise ValueError("The file is not a '.demo' file. Please provide a file with the "
"correct extension.")
brain_params = None
brain_infos = []
data = open(file_path, "rb").read()
next_pos, pos, obs_decoded = 0, 0, 0
total_expected = 0
while pos < len(data):
next_pos, pos = _DecodeVarint32(data, pos)
if obs_decoded == 0:
meta_data_proto = DemonstrationMetaProto()
meta_data_proto.ParseFromString(data[pos:pos + next_pos])
total_expected = meta_data_proto.number_steps
pos = INITIAL_POS
if obs_decoded == 1:
brain_param_proto = BrainParametersProto()
brain_param_proto.ParseFromString(data[pos:pos + next_pos])
brain_params = BrainParameters.from_proto(brain_param_proto)
pos += next_pos
if obs_decoded > 1:
agent_info = AgentInfoProto()
agent_info.ParseFromString(data[pos:pos + next_pos])
brain_info = BrainInfo.from_agent_proto([agent_info], brain_params)
brain_infos.append(brain_info)
if len(brain_infos) == total_expected:
break
pos += next_pos
obs_decoded += 1
return brain_params, brain_infos, total_expected
|
Saves current model to checkpoint folder.
:param steps: Current number of steps in training process.
:param saver: Tensorflow saver for session.
def _save_model(self, steps=0):
"""
Saves current model to checkpoint folder.
:param steps: Current number of steps in training process.
:param saver: Tensorflow saver for session.
"""
for brain_name in self.trainers.keys():
self.trainers[brain_name].save_model()
self.logger.info('Saved Model')
|
Write all CSV metrics
:return:
def _write_training_metrics(self):
"""
Write all CSV metrics
:return:
"""
for brain_name in self.trainers.keys():
if brain_name in self.trainer_metrics:
self.trainers[brain_name].write_training_metrics()
|
Exports latest saved models to .nn format for Unity embedding.
def _export_graph(self):
"""
Exports latest saved models to .nn format for Unity embedding.
"""
for brain_name in self.trainers.keys():
self.trainers[brain_name].export_model()
|
Initialization of the trainers
:param trainer_config: The configurations of the trainers
def initialize_trainers(self, trainer_config: Dict[str, Dict[str, str]]):
"""
Initialization of the trainers
:param trainer_config: The configurations of the trainers
"""
trainer_parameters_dict = {}
for brain_name in self.external_brains:
trainer_parameters = trainer_config['default'].copy()
trainer_parameters['summary_path'] = '{basedir}/{name}'.format(
basedir=self.summaries_dir,
name=str(self.run_id) + '_' + brain_name)
trainer_parameters['model_path'] = '{basedir}/{name}'.format(
basedir=self.model_path,
name=brain_name)
trainer_parameters['keep_checkpoints'] = self.keep_checkpoints
if brain_name in trainer_config:
_brain_key = brain_name
while not isinstance(trainer_config[_brain_key], dict):
_brain_key = trainer_config[_brain_key]
for k in trainer_config[_brain_key]:
trainer_parameters[k] = trainer_config[_brain_key][k]
trainer_parameters_dict[brain_name] = trainer_parameters.copy()
for brain_name in self.external_brains:
if trainer_parameters_dict[brain_name]['trainer'] == 'offline_bc':
self.trainers[brain_name] = OfflineBCTrainer(
self.external_brains[brain_name],
trainer_parameters_dict[brain_name], self.train_model,
self.load_model, self.seed, self.run_id)
elif trainer_parameters_dict[brain_name]['trainer'] == 'online_bc':
self.trainers[brain_name] = OnlineBCTrainer(
self.external_brains[brain_name],
trainer_parameters_dict[brain_name], self.train_model,
self.load_model, self.seed, self.run_id)
elif trainer_parameters_dict[brain_name]['trainer'] == 'ppo':
self.trainers[brain_name] = PPOTrainer(
self.external_brains[brain_name],
self.meta_curriculum
.brains_to_curriculums[brain_name]
.min_lesson_length if self.meta_curriculum else 0,
trainer_parameters_dict[brain_name],
self.train_model, self.load_model, self.seed,
self.run_id)
self.trainer_metrics[brain_name] = self.trainers[brain_name].trainer_metrics
else:
raise UnityEnvironmentException('The trainer config contains '
'an unknown trainer type for '
'brain {}'
.format(brain_name))
|
Resets the environment.
Returns:
A Data structure corresponding to the initial reset state of the
environment.
def _reset_env(self, env: BaseUnityEnvironment):
"""Resets the environment.
Returns:
A Data structure corresponding to the initial reset state of the
environment.
"""
if self.meta_curriculum is not None:
return env.reset(train_mode=self.fast_simulation, config=self.meta_curriculum.get_config())
else:
return env.reset(train_mode=self.fast_simulation)
|
Sends a shutdown signal to the unity environment, and closes the socket connection.
def close(self):
"""
Sends a shutdown signal to the unity environment, and closes the socket connection.
"""
if self._socket is not None and self._conn is not None:
message_input = UnityMessage()
message_input.header.status = 400
self._communicator_send(message_input.SerializeToString())
if self._socket is not None:
self._socket.close()
self._socket = None
if self._socket is not None:
self._conn.close()
self._conn = None
|
float sqrt_var = sqrt(var_data[i]);
a_data[i] = bias_data[i] - slope_data[i] * mean_data[i] / sqrt_var;
b_data[i] = slope_data[i] / sqrt_var;
...
ptr[i] = b * ptr[i] + a;
def fuse_batchnorm_weights(gamma, beta, mean, var, epsilon):
# https://github.com/Tencent/ncnn/blob/master/src/layer/batchnorm.cpp
""" float sqrt_var = sqrt(var_data[i]);
a_data[i] = bias_data[i] - slope_data[i] * mean_data[i] / sqrt_var;
b_data[i] = slope_data[i] / sqrt_var;
...
ptr[i] = b * ptr[i] + a;
"""
scale = gamma / np.sqrt(var + epsilon)
bias = beta - gamma * mean / np.sqrt(var + epsilon)
return [scale, bias]
|
- Ht = f(Xt*Wi + Ht_1*Ri + Wbi + Rbi)
def rnn(name, input, state, kernel, bias, new_state, number_of_gates = 2):
''' - Ht = f(Xt*Wi + Ht_1*Ri + Wbi + Rbi)
'''
nn = Build(name)
nn.tanh(
nn.mad(kernel=kernel, bias=bias,
x=nn.concat(input, state)),
out=new_state);
return nn.layers;
|
- zt = f(Xt*Wz + Ht_1*Rz + Wbz + Rbz)
- rt = f(Xt*Wr + Ht_1*Rr + Wbr + Rbr)
- ht = g(Xt*Wh + (rt . Ht_1)*Rh + Rbh + Wbh)
- Ht = (1-zt).ht + zt.Ht_1
def gru(name, input, state, kernel_r, kernel_u, kernel_c, bias_r, bias_u, bias_c, new_state, number_of_gates = 2):
''' - zt = f(Xt*Wz + Ht_1*Rz + Wbz + Rbz)
- rt = f(Xt*Wr + Ht_1*Rr + Wbr + Rbr)
- ht = g(Xt*Wh + (rt . Ht_1)*Rh + Rbh + Wbh)
- Ht = (1-zt).ht + zt.Ht_1
'''
nn = Build(name)
inputs = nn.concat(input, state)
u = nn.sigmoid(nn.mad(inputs, kernel_u, bias_u))
r = nn.sigmoid(nn.mad(inputs, kernel_r, bias_r))
r_state = nn.mul(r, state)
c = nn.tanh(nn.mad(kernel=kernel_c, bias=bias_c,
x=nn.concat(input, r_state)))
# new_h = u' * state + (1 - u') * c'
# = u' * state + c' - u' * c'
# u' * state + c'
nn.add(nn.mul(u, state), c)
# - u' * c'
nn.sub(nn._, nn.mul(u, c),
out=new_state)
return nn.layers;
|
Full:
- it = f(Xt*Wi + Ht_1*Ri + Pi . Ct_1 + Wbi + Rbi)
- ft = f(Xt*Wf + Ht_1*Rf + Pf . Ct_1 + Wbf + Rbf)
- ct = g(Xt*Wc + Ht_1*Rc + Wbc + Rbc)
- Ct = ft . Ct_1 + it . ct
- ot = f(Xt*Wo + Ht_1*Ro + Po . Ct + Wbo + Rbo)
- Ht = ot . h(Ct)
def lstm(name, input, state_c, state_h, kernel_i, kernel_j, kernel_f, kernel_o, bias_i, bias_j, bias_f, bias_o, new_state_c, new_state_h):
''' Full:
- it = f(Xt*Wi + Ht_1*Ri + Pi . Ct_1 + Wbi + Rbi)
- ft = f(Xt*Wf + Ht_1*Rf + Pf . Ct_1 + Wbf + Rbf)
- ct = g(Xt*Wc + Ht_1*Rc + Wbc + Rbc)
- Ct = ft . Ct_1 + it . ct
- ot = f(Xt*Wo + Ht_1*Ro + Po . Ct + Wbo + Rbo)
- Ht = ot . h(Ct)
'''
''' No peephole:
- it = f(Xt*Wi + Ht_1*Ri + Wbi + Rbi)
- ft = f(Xt*Wf + Ht_1*Rf + Wbf + Rbf)
- ct = g(Xt*Wc + Ht_1*Rc + Wbc + Rbc)
- Ct = ft . Ct_ + it . ct
- ot = f(Xt*Wo + Ht_1*Ro + Wbo + Rbo)
- Ht = ot . h(Ct)
'''
nn = Build(name)
inputs = nn.concat(input, state_h)
i = nn.sigmoid(nn.mad(x=inputs, kernel=kernel_i, bias=bias_i))
j = nn.tanh(nn.mad(inputs, kernel_j, bias_j))
f = nn.sigmoid(nn.mad(inputs, kernel_f, bias_f))
o = nn.sigmoid(nn.mad(inputs, kernel_o, bias_o))
# new_c = state_c * f' + i' * j'
nn.add(
nn.mul(state_c, f), nn.mul(i, j),
out=new_state_c)
# new_h =
nn.mul(o, nn.tanh(new_state_c),
out=new_state_h)
return nn.layers
|
Evaluates policy for the agent experiences provided.
:param brain_info: BrainInfo input to network.
:return: Results of evaluation.
def evaluate(self, brain_info):
"""
Evaluates policy for the agent experiences provided.
:param brain_info: BrainInfo input to network.
:return: Results of evaluation.
"""
feed_dict = {self.model.dropout_rate: self.evaluate_rate,
self.model.sequence_length: 1}
feed_dict = self._fill_eval_dict(feed_dict, brain_info)
if self.use_recurrent:
if brain_info.memories.shape[1] == 0:
brain_info.memories = self.make_empty_memory(len(brain_info.agents))
feed_dict[self.model.memory_in] = brain_info.memories
run_out = self._execute_model(feed_dict, self.inference_dict)
return run_out
|
Performs update on model.
:param mini_batch: Batch of experiences.
:param num_sequences: Number of sequences to process.
:return: Results of update.
def update(self, mini_batch, num_sequences):
"""
Performs update on model.
:param mini_batch: Batch of experiences.
:param num_sequences: Number of sequences to process.
:return: Results of update.
"""
feed_dict = {self.model.dropout_rate: self.update_rate,
self.model.batch_size: num_sequences,
self.model.sequence_length: self.sequence_length}
if self.use_continuous_act:
feed_dict[self.model.true_action] = mini_batch['actions']. \
reshape([-1, self.brain.vector_action_space_size[0]])
else:
feed_dict[self.model.true_action] = mini_batch['actions'].reshape(
[-1, len(self.brain.vector_action_space_size)])
feed_dict[self.model.action_masks] = np.ones(
(num_sequences, sum(self.brain.vector_action_space_size)))
if self.use_vec_obs:
apparent_obs_size = self.brain.vector_observation_space_size * \
self.brain.num_stacked_vector_observations
feed_dict[self.model.vector_in] = mini_batch['vector_obs'] \
.reshape([-1,apparent_obs_size])
for i, _ in enumerate(self.model.visual_in):
visual_obs = mini_batch['visual_obs%d' % i]
feed_dict[self.model.visual_in[i]] = visual_obs
if self.use_recurrent:
feed_dict[self.model.memory_in] = np.zeros([num_sequences, self.m_size])
run_out = self._execute_model(feed_dict, self.update_dict)
return run_out
|
Increments the lesson number depending on the progress given.
:param measure_val: Measure of progress (either reward or percentage
steps completed).
:return Whether the lesson was incremented.
def increment_lesson(self, measure_val):
"""
Increments the lesson number depending on the progress given.
:param measure_val: Measure of progress (either reward or percentage
steps completed).
:return Whether the lesson was incremented.
"""
if not self.data or not measure_val or math.isnan(measure_val):
return False
if self.data['signal_smoothing']:
measure_val = self.smoothing_value * 0.25 + 0.75 * measure_val
self.smoothing_value = measure_val
if self.lesson_num < self.max_lesson_num:
if measure_val > self.data['thresholds'][self.lesson_num]:
self.lesson_num += 1
config = {}
parameters = self.data['parameters']
for key in parameters:
config[key] = parameters[key][self.lesson_num]
logger.info('{0} lesson changed. Now in lesson {1}: {2}'
.format(self._brain_name,
self.lesson_num,
', '.join([str(x) + ' -> ' + str(config[x])
for x in config])))
return True
return False
|
Returns reset parameters which correspond to the lesson.
:param lesson: The lesson you want to get the config of. If None, the
current lesson is returned.
:return: The configuration of the reset parameters.
def get_config(self, lesson=None):
"""
Returns reset parameters which correspond to the lesson.
:param lesson: The lesson you want to get the config of. If None, the
current lesson is returned.
:return: The configuration of the reset parameters.
"""
if not self.data:
return {}
if lesson is None:
lesson = self.lesson_num
lesson = max(0, min(lesson, self.max_lesson_num))
config = {}
parameters = self.data['parameters']
for key in parameters:
config[key] = parameters[key][lesson]
return config
|
Computes generalized advantage estimate for use in updating policy.
:param rewards: list of rewards for time-steps t to T.
:param value_next: Value estimate for time-step T+1.
:param value_estimates: list of value estimates for time-steps t to T.
:param gamma: Discount factor.
:param lambd: GAE weighing factor.
:return: list of advantage estimates for time-steps t to T.
def get_gae(rewards, value_estimates, value_next=0.0, gamma=0.99, lambd=0.95):
"""
Computes generalized advantage estimate for use in updating policy.
:param rewards: list of rewards for time-steps t to T.
:param value_next: Value estimate for time-step T+1.
:param value_estimates: list of value estimates for time-steps t to T.
:param gamma: Discount factor.
:param lambd: GAE weighing factor.
:return: list of advantage estimates for time-steps t to T.
"""
value_estimates = np.asarray(value_estimates.tolist() + [value_next])
delta_t = rewards + gamma * value_estimates[1:] - value_estimates[:-1]
advantage = discount_rewards(r=delta_t, gamma=gamma * lambd)
return advantage
|
Increment the step count of the trainer and Updates the last reward
def increment_step_and_update_last_reward(self):
"""
Increment the step count of the trainer and Updates the last reward
"""
if len(self.stats['Environment/Cumulative Reward']) > 0:
mean_reward = np.mean(self.stats['Environment/Cumulative Reward'])
self.policy.update_reward(mean_reward)
self.policy.increment_step()
self.step = self.policy.get_current_step()
|
Constructs a BrainInfo which contains the most recent previous experiences for all agents info
which correspond to the agents in a provided next_info.
:BrainInfo next_info: A t+1 BrainInfo.
:return: curr_info: Reconstructed BrainInfo to match agents of next_info.
def construct_curr_info(self, next_info: BrainInfo) -> BrainInfo:
"""
Constructs a BrainInfo which contains the most recent previous experiences for all agents info
which correspond to the agents in a provided next_info.
:BrainInfo next_info: A t+1 BrainInfo.
:return: curr_info: Reconstructed BrainInfo to match agents of next_info.
"""
visual_observations = [[]]
vector_observations = []
text_observations = []
memories = []
rewards = []
local_dones = []
max_reacheds = []
agents = []
prev_vector_actions = []
prev_text_actions = []
action_masks = []
for agent_id in next_info.agents:
agent_brain_info = self.training_buffer[agent_id].last_brain_info
if agent_brain_info is None:
agent_brain_info = next_info
agent_index = agent_brain_info.agents.index(agent_id)
for i in range(len(next_info.visual_observations)):
visual_observations[i].append(agent_brain_info.visual_observations[i][agent_index])
vector_observations.append(agent_brain_info.vector_observations[agent_index])
text_observations.append(agent_brain_info.text_observations[agent_index])
if self.policy.use_recurrent:
if len(agent_brain_info.memories) > 0:
memories.append(agent_brain_info.memories[agent_index])
else:
memories.append(self.policy.make_empty_memory(1))
rewards.append(agent_brain_info.rewards[agent_index])
local_dones.append(agent_brain_info.local_done[agent_index])
max_reacheds.append(agent_brain_info.max_reached[agent_index])
agents.append(agent_brain_info.agents[agent_index])
prev_vector_actions.append(agent_brain_info.previous_vector_actions[agent_index])
prev_text_actions.append(agent_brain_info.previous_text_actions[agent_index])
action_masks.append(agent_brain_info.action_masks[agent_index])
if self.policy.use_recurrent:
memories = np.vstack(memories)
curr_info = BrainInfo(visual_observations, vector_observations, text_observations,
memories, rewards, agents, local_dones, prev_vector_actions,
prev_text_actions, max_reacheds, action_masks)
return curr_info
|
Adds experiences to each agent's experience history.
:param curr_all_info: Dictionary of all current brains and corresponding BrainInfo.
:param next_all_info: Dictionary of all current brains and corresponding BrainInfo.
:param take_action_outputs: The outputs of the Policy's get_action method.
def add_experiences(self, curr_all_info: AllBrainInfo, next_all_info: AllBrainInfo, take_action_outputs):
"""
Adds experiences to each agent's experience history.
:param curr_all_info: Dictionary of all current brains and corresponding BrainInfo.
:param next_all_info: Dictionary of all current brains and corresponding BrainInfo.
:param take_action_outputs: The outputs of the Policy's get_action method.
"""
self.trainer_metrics.start_experience_collection_timer()
if take_action_outputs:
self.stats['Policy/Value Estimate'].append(take_action_outputs['value'].mean())
self.stats['Policy/Entropy'].append(take_action_outputs['entropy'].mean())
self.stats['Policy/Learning Rate'].append(take_action_outputs['learning_rate'])
curr_info = curr_all_info[self.brain_name]
next_info = next_all_info[self.brain_name]
for agent_id in curr_info.agents:
self.training_buffer[agent_id].last_brain_info = curr_info
self.training_buffer[agent_id].last_take_action_outputs = take_action_outputs
if curr_info.agents != next_info.agents:
curr_to_use = self.construct_curr_info(next_info)
else:
curr_to_use = curr_info
intrinsic_rewards = self.policy.get_intrinsic_rewards(curr_to_use, next_info)
for agent_id in next_info.agents:
stored_info = self.training_buffer[agent_id].last_brain_info
stored_take_action_outputs = self.training_buffer[agent_id].last_take_action_outputs
if stored_info is not None:
idx = stored_info.agents.index(agent_id)
next_idx = next_info.agents.index(agent_id)
if not stored_info.local_done[idx]:
for i, _ in enumerate(stored_info.visual_observations):
self.training_buffer[agent_id]['visual_obs%d' % i].append(
stored_info.visual_observations[i][idx])
self.training_buffer[agent_id]['next_visual_obs%d' % i].append(
next_info.visual_observations[i][next_idx])
if self.policy.use_vec_obs:
self.training_buffer[agent_id]['vector_obs'].append(stored_info.vector_observations[idx])
self.training_buffer[agent_id]['next_vector_in'].append(
next_info.vector_observations[next_idx])
if self.policy.use_recurrent:
if stored_info.memories.shape[1] == 0:
stored_info.memories = np.zeros((len(stored_info.agents), self.policy.m_size))
self.training_buffer[agent_id]['memory'].append(stored_info.memories[idx])
actions = stored_take_action_outputs['action']
if self.policy.use_continuous_act:
actions_pre = stored_take_action_outputs['pre_action']
self.training_buffer[agent_id]['actions_pre'].append(actions_pre[idx])
epsilons = stored_take_action_outputs['random_normal_epsilon']
self.training_buffer[agent_id]['random_normal_epsilon'].append(
epsilons[idx])
else:
self.training_buffer[agent_id]['action_mask'].append(
stored_info.action_masks[idx], padding_value=1)
a_dist = stored_take_action_outputs['log_probs']
value = stored_take_action_outputs['value']
self.training_buffer[agent_id]['actions'].append(actions[idx])
self.training_buffer[agent_id]['prev_action'].append(stored_info.previous_vector_actions[idx])
self.training_buffer[agent_id]['masks'].append(1.0)
if self.use_curiosity:
self.training_buffer[agent_id]['rewards'].append(next_info.rewards[next_idx] +
intrinsic_rewards[next_idx])
else:
self.training_buffer[agent_id]['rewards'].append(next_info.rewards[next_idx])
self.training_buffer[agent_id]['action_probs'].append(a_dist[idx])
self.training_buffer[agent_id]['value_estimates'].append(value[idx][0])
if agent_id not in self.cumulative_rewards:
self.cumulative_rewards[agent_id] = 0
self.cumulative_rewards[agent_id] += next_info.rewards[next_idx]
if self.use_curiosity:
if agent_id not in self.intrinsic_rewards:
self.intrinsic_rewards[agent_id] = 0
self.intrinsic_rewards[agent_id] += intrinsic_rewards[next_idx]
if not next_info.local_done[next_idx]:
if agent_id not in self.episode_steps:
self.episode_steps[agent_id] = 0
self.episode_steps[agent_id] += 1
self.trainer_metrics.end_experience_collection_timer()
|
Checks agent histories for processing condition, and processes them as necessary.
Processing involves calculating value and advantage targets for model updating step.
:param current_info: Dictionary of all current brains and corresponding BrainInfo.
:param new_info: Dictionary of all next brains and corresponding BrainInfo.
def process_experiences(self, current_info: AllBrainInfo, new_info: AllBrainInfo):
"""
Checks agent histories for processing condition, and processes them as necessary.
Processing involves calculating value and advantage targets for model updating step.
:param current_info: Dictionary of all current brains and corresponding BrainInfo.
:param new_info: Dictionary of all next brains and corresponding BrainInfo.
"""
self.trainer_metrics.start_experience_collection_timer()
info = new_info[self.brain_name]
for l in range(len(info.agents)):
agent_actions = self.training_buffer[info.agents[l]]['actions']
if ((info.local_done[l] or len(agent_actions) > self.trainer_parameters['time_horizon'])
and len(agent_actions) > 0):
agent_id = info.agents[l]
if info.local_done[l] and not info.max_reached[l]:
value_next = 0.0
else:
if info.max_reached[l]:
bootstrapping_info = self.training_buffer[agent_id].last_brain_info
idx = bootstrapping_info.agents.index(agent_id)
else:
bootstrapping_info = info
idx = l
value_next = self.policy.get_value_estimate(bootstrapping_info, idx)
self.training_buffer[agent_id]['advantages'].set(
get_gae(
rewards=self.training_buffer[agent_id]['rewards'].get_batch(),
value_estimates=self.training_buffer[agent_id]['value_estimates'].get_batch(),
value_next=value_next,
gamma=self.trainer_parameters['gamma'],
lambd=self.trainer_parameters['lambd']))
self.training_buffer[agent_id]['discounted_returns'].set(
self.training_buffer[agent_id]['advantages'].get_batch()
+ self.training_buffer[agent_id]['value_estimates'].get_batch())
self.training_buffer.append_update_buffer(agent_id, batch_size=None,
training_length=self.policy.sequence_length)
self.training_buffer[agent_id].reset_agent()
if info.local_done[l]:
self.cumulative_returns_since_policy_update.append(self.
cumulative_rewards.get(agent_id, 0))
self.stats['Environment/Cumulative Reward'].append(
self.cumulative_rewards.get(agent_id, 0))
self.reward_buffer.appendleft(self.cumulative_rewards.get(agent_id, 0))
self.stats['Environment/Episode Length'].append(
self.episode_steps.get(agent_id, 0))
self.cumulative_rewards[agent_id] = 0
self.episode_steps[agent_id] = 0
if self.use_curiosity:
self.stats['Policy/Curiosity Reward'].append(
self.intrinsic_rewards.get(agent_id, 0))
self.intrinsic_rewards[agent_id] = 0
self.trainer_metrics.end_experience_collection_timer()
|
A signal that the Episode has ended. The buffer must be reset.
Get only called when the academy resets.
def end_episode(self):
"""
A signal that the Episode has ended. The buffer must be reset.
Get only called when the academy resets.
"""
self.training_buffer.reset_local_buffers()
for agent_id in self.cumulative_rewards:
self.cumulative_rewards[agent_id] = 0
for agent_id in self.episode_steps:
self.episode_steps[agent_id] = 0
if self.use_curiosity:
for agent_id in self.intrinsic_rewards:
self.intrinsic_rewards[agent_id] = 0
|
Returns whether or not the trainer has enough elements to run update model
:return: A boolean corresponding to whether or not update_model() can be run
def is_ready_update(self):
"""
Returns whether or not the trainer has enough elements to run update model
:return: A boolean corresponding to whether or not update_model() can be run
"""
size_of_buffer = len(self.training_buffer.update_buffer['actions'])
return size_of_buffer > max(int(self.trainer_parameters['buffer_size'] / self.policy.sequence_length), 1)
|
Uses demonstration_buffer to update the policy.
def update_policy(self):
"""
Uses demonstration_buffer to update the policy.
"""
self.trainer_metrics.start_policy_update_timer(
number_experiences=len(self.training_buffer.update_buffer['actions']),
mean_return=float(np.mean(self.cumulative_returns_since_policy_update)))
n_sequences = max(int(self.trainer_parameters['batch_size'] / self.policy.sequence_length), 1)
value_total, policy_total, forward_total, inverse_total = [], [], [], []
advantages = self.training_buffer.update_buffer['advantages'].get_batch()
self.training_buffer.update_buffer['advantages'].set(
(advantages - advantages.mean()) / (advantages.std() + 1e-10))
num_epoch = self.trainer_parameters['num_epoch']
for _ in range(num_epoch):
self.training_buffer.update_buffer.shuffle()
buffer = self.training_buffer.update_buffer
for l in range(len(self.training_buffer.update_buffer['actions']) // n_sequences):
start = l * n_sequences
end = (l + 1) * n_sequences
run_out = self.policy.update(buffer.make_mini_batch(start, end), n_sequences)
value_total.append(run_out['value_loss'])
policy_total.append(np.abs(run_out['policy_loss']))
if self.use_curiosity:
inverse_total.append(run_out['inverse_loss'])
forward_total.append(run_out['forward_loss'])
self.stats['Losses/Value Loss'].append(np.mean(value_total))
self.stats['Losses/Policy Loss'].append(np.mean(policy_total))
if self.use_curiosity:
self.stats['Losses/Forward Loss'].append(np.mean(forward_total))
self.stats['Losses/Inverse Loss'].append(np.mean(inverse_total))
self.training_buffer.reset_update_buffer()
self.trainer_metrics.end_policy_update()
|
Resets the state of the environment and returns an initial observation.
In the case of multi-agent environments, this is a list.
Returns: observation (object/list): the initial observation of the
space.
def reset(self):
"""Resets the state of the environment and returns an initial observation.
In the case of multi-agent environments, this is a list.
Returns: observation (object/list): the initial observation of the
space.
"""
info = self._env.reset()[self.brain_name]
n_agents = len(info.agents)
self._check_agents(n_agents)
self.game_over = False
if not self._multiagent:
obs, reward, done, info = self._single_step(info)
else:
obs, reward, done, info = self._multi_step(info)
return obs
|
Run one timestep of the environment's dynamics. When end of
episode is reached, you are responsible for calling `reset()`
to reset this environment's state.
Accepts an action and returns a tuple (observation, reward, done, info).
In the case of multi-agent environments, these are lists.
Args:
action (object/list): an action provided by the environment
Returns:
observation (object/list): agent's observation of the current environment
reward (float/list) : amount of reward returned after previous action
done (boolean/list): whether the episode has ended.
info (dict): contains auxiliary diagnostic information, including BrainInfo.
def step(self, action):
"""Run one timestep of the environment's dynamics. When end of
episode is reached, you are responsible for calling `reset()`
to reset this environment's state.
Accepts an action and returns a tuple (observation, reward, done, info).
In the case of multi-agent environments, these are lists.
Args:
action (object/list): an action provided by the environment
Returns:
observation (object/list): agent's observation of the current environment
reward (float/list) : amount of reward returned after previous action
done (boolean/list): whether the episode has ended.
info (dict): contains auxiliary diagnostic information, including BrainInfo.
"""
# Use random actions for all other agents in environment.
if self._multiagent:
if not isinstance(action, list):
raise UnityGymException("The environment was expecting `action` to be a list.")
if len(action) != self._n_agents:
raise UnityGymException(
"The environment was expecting a list of {} actions.".format(self._n_agents))
else:
if self._flattener is not None:
# Action space is discrete and flattened - we expect a list of scalars
action = [self._flattener.lookup_action(_act) for _act in action]
action = np.array(action)
else:
if self._flattener is not None:
# Translate action into list
action = self._flattener.lookup_action(action)
info = self._env.step(action)[self.brain_name]
n_agents = len(info.agents)
self._check_agents(n_agents)
self._current_state = info
if not self._multiagent:
obs, reward, done, info = self._single_step(info)
self.game_over = done
else:
obs, reward, done, info = self._multi_step(info)
self.game_over = all(done)
return obs, reward, done, info
|
Creates a Dict that maps discrete actions (scalars) to branched actions (lists).
Each key in the Dict maps to one unique set of branched actions, and each value
contains the List of branched actions.
def _create_lookup(self, branched_action_space):
"""
Creates a Dict that maps discrete actions (scalars) to branched actions (lists).
Each key in the Dict maps to one unique set of branched actions, and each value
contains the List of branched actions.
"""
possible_vals = [range(_num) for _num in branched_action_space]
all_actions = [list(_action) for _action in itertools.product(*possible_vals)]
# Dict should be faster than List for large action spaces
action_lookup = {_scalar: _action for (_scalar, _action) in enumerate(all_actions)}
return action_lookup
|
Creates the GRPC server.
def create_server(self):
"""
Creates the GRPC server.
"""
self.check_port(self.port)
try:
# Establish communication grpc
self.server = grpc.server(ThreadPoolExecutor(max_workers=10))
self.unity_to_external = UnityToExternalServicerImplementation()
add_UnityToExternalServicer_to_server(self.unity_to_external, self.server)
# Using unspecified address, which means that grpc is communicating on all IPs
# This is so that the docker container can connect.
self.server.add_insecure_port('[::]:' + str(self.port))
self.server.start()
self.is_open = True
except:
raise UnityWorkerInUseException(self.worker_id)
|
Attempts to bind to the requested communicator port, checking if it is already in use.
def check_port(self, port):
"""
Attempts to bind to the requested communicator port, checking if it is already in use.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(("localhost", port))
except socket.error:
raise UnityWorkerInUseException(self.worker_id)
finally:
s.close()
|
Sends a shutdown signal to the unity environment, and closes the grpc connection.
def close(self):
"""
Sends a shutdown signal to the unity environment, and closes the grpc connection.
"""
if self.is_open:
message_input = UnityMessage()
message_input.header.status = 400
self.unity_to_external.parent_conn.send(message_input)
self.unity_to_external.parent_conn.close()
self.server.stop(False)
self.is_open = False
|
Converts byte array observation image into numpy array, re-sizes it,
and optionally converts it to grey scale
:param gray_scale: Whether to convert the image to grayscale.
:param image_bytes: input byte array corresponding to image
:return: processed numpy array of observation from environment
def process_pixels(image_bytes, gray_scale):
"""
Converts byte array observation image into numpy array, re-sizes it,
and optionally converts it to grey scale
:param gray_scale: Whether to convert the image to grayscale.
:param image_bytes: input byte array corresponding to image
:return: processed numpy array of observation from environment
"""
s = bytearray(image_bytes)
image = Image.open(io.BytesIO(s))
s = np.array(image) / 255.0
if gray_scale:
s = np.mean(s, axis=2)
s = np.reshape(s, [s.shape[0], s.shape[1], 1])
return s
|
Converts list of agent infos to BrainInfo.
def from_agent_proto(agent_info_list, brain_params):
"""
Converts list of agent infos to BrainInfo.
"""
vis_obs = []
for i in range(brain_params.number_visual_observations):
obs = [BrainInfo.process_pixels(x.visual_observations[i],
brain_params.camera_resolutions[i]['blackAndWhite'])
for x in agent_info_list]
vis_obs += [obs]
if len(agent_info_list) == 0:
memory_size = 0
else:
memory_size = max([len(x.memories) for x in agent_info_list])
if memory_size == 0:
memory = np.zeros((0, 0))
else:
[x.memories.extend([0] * (memory_size - len(x.memories))) for x in agent_info_list]
memory = np.array([list(x.memories) for x in agent_info_list])
total_num_actions = sum(brain_params.vector_action_space_size)
mask_actions = np.ones((len(agent_info_list), total_num_actions))
for agent_index, agent_info in enumerate(agent_info_list):
if agent_info.action_mask is not None:
if len(agent_info.action_mask) == total_num_actions:
mask_actions[agent_index, :] = [
0 if agent_info.action_mask[k] else 1 for k in range(total_num_actions)]
if any([np.isnan(x.reward) for x in agent_info_list]):
logger.warning("An agent had a NaN reward for brain " + brain_params.brain_name)
if any([np.isnan(x.stacked_vector_observation).any() for x in agent_info_list]):
logger.warning("An agent had a NaN observation for brain " + brain_params.brain_name)
if len(agent_info_list) == 0:
vector_obs = np.zeros(
(0, brain_params.vector_observation_space_size * brain_params.num_stacked_vector_observations)
)
else:
vector_obs = np.nan_to_num(
np.array([x.stacked_vector_observation for x in agent_info_list])
)
brain_info = BrainInfo(
visual_observation=vis_obs,
vector_observation=vector_obs,
text_observations=[x.text_observation for x in agent_info_list],
memory=memory,
reward=[x.reward if not np.isnan(x.reward) else 0 for x in agent_info_list],
agents=[x.id for x in agent_info_list],
local_done=[x.done for x in agent_info_list],
vector_action=np.array([x.stored_vector_actions for x in agent_info_list]),
text_action=[list(x.stored_text_actions) for x in agent_info_list],
max_reached=[x.max_step_reached for x in agent_info_list],
custom_observations=[x.custom_observation for x in agent_info_list],
action_mask=mask_actions
)
return brain_info
|
Converts brain parameter proto to BrainParameter object.
:param brain_param_proto: protobuf object.
:return: BrainParameter object.
def from_proto(brain_param_proto):
"""
Converts brain parameter proto to BrainParameter object.
:param brain_param_proto: protobuf object.
:return: BrainParameter object.
"""
resolution = [{
"height": x.height,
"width": x.width,
"blackAndWhite": x.gray_scale
} for x in brain_param_proto.camera_resolutions]
brain_params = BrainParameters(brain_param_proto.brain_name,
brain_param_proto.vector_observation_size,
brain_param_proto.num_stacked_vector_observations,
resolution,
list(brain_param_proto.vector_action_size),
list(brain_param_proto.vector_action_descriptions),
brain_param_proto.vector_action_space_type)
return brain_params
|
Creates a new, blank dashboard and redirects to it in edit mode
def new(self):
"""Creates a new, blank dashboard and redirects to it in edit mode"""
new_dashboard = models.Dashboard(
dashboard_title='[ untitled dashboard ]',
owners=[g.user],
)
db.session.add(new_dashboard)
db.session.commit()
return redirect(f'/superset/dashboard/{new_dashboard.id}/?edit=true')
|
List all tags a given object has.
def get(self, object_type, object_id):
"""List all tags a given object has."""
if object_id == 0:
return json_success(json.dumps([]))
query = db.session.query(TaggedObject).filter(and_(
TaggedObject.object_type == object_type,
TaggedObject.object_id == object_id))
tags = [{'id': obj.tag.id, 'name': obj.tag.name} for obj in query]
return json_success(json.dumps(tags))
|
Add new tags to an object.
def post(self, object_type, object_id):
"""Add new tags to an object."""
if object_id == 0:
return Response(status=404)
tagged_objects = []
for name in request.get_json(force=True):
if ':' in name:
type_name = name.split(':', 1)[0]
type_ = TagTypes[type_name]
else:
type_ = TagTypes.custom
tag = db.session.query(Tag).filter_by(name=name, type=type_).first()
if not tag:
tag = Tag(name=name, type=type_)
tagged_objects.append(
TaggedObject(
object_id=object_id,
object_type=object_type,
tag=tag,
),
)
db.session.add_all(tagged_objects)
db.session.commit()
return Response(status=201)
|
Remove tags from an object.
def delete(self, object_type, object_id):
"""Remove tags from an object."""
tag_names = request.get_json(force=True)
if not tag_names:
return Response(status=403)
db.session.query(TaggedObject).filter(and_(
TaggedObject.object_type == object_type,
TaggedObject.object_id == object_id),
TaggedObject.tag.has(Tag.name.in_(tag_names)),
).delete(synchronize_session=False)
db.session.commit()
return Response(status=204)
|
Imports the datasource from the object to the database.
Metrics and columns and datasource will be overrided if exists.
This function can be used to import/export dashboards between multiple
superset instances. Audit metadata isn't copies over.
def import_datasource(
session,
i_datasource,
lookup_database,
lookup_datasource,
import_time):
"""Imports the datasource from the object to the database.
Metrics and columns and datasource will be overrided if exists.
This function can be used to import/export dashboards between multiple
superset instances. Audit metadata isn't copies over.
"""
make_transient(i_datasource)
logging.info('Started import of the datasource: {}'.format(
i_datasource.to_json()))
i_datasource.id = None
i_datasource.database_id = lookup_database(i_datasource).id
i_datasource.alter_params(import_time=import_time)
# override the datasource
datasource = lookup_datasource(i_datasource)
if datasource:
datasource.override(i_datasource)
session.flush()
else:
datasource = i_datasource.copy()
session.add(datasource)
session.flush()
for m in i_datasource.metrics:
new_m = m.copy()
new_m.table_id = datasource.id
logging.info('Importing metric {} from the datasource: {}'.format(
new_m.to_json(), i_datasource.full_name))
imported_m = i_datasource.metric_class.import_obj(new_m)
if (imported_m.metric_name not in
[m.metric_name for m in datasource.metrics]):
datasource.metrics.append(imported_m)
for c in i_datasource.columns:
new_c = c.copy()
new_c.table_id = datasource.id
logging.info('Importing column {} from the datasource: {}'.format(
new_c.to_json(), i_datasource.full_name))
imported_c = i_datasource.column_class.import_obj(new_c)
if (imported_c.column_name not in
[c.column_name for c in datasource.columns]):
datasource.columns.append(imported_c)
session.flush()
return datasource.id
|
Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
# this callback is used to prevent an auto-migration from being generated
# when there are no changes to the schema
# reference: https://alembic.sqlalchemy.org/en/latest/cookbook.html
def process_revision_directives(context, revision, directives):
if getattr(config.cmd_opts, 'autogenerate', False):
script = directives[0]
if script.upgrade_ops.is_empty():
directives[:] = []
logger.info('No changes in schema detected.')
engine = engine_from_config(config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
connection = engine.connect()
kwargs = {}
if engine.name in ('sqlite', 'mysql'):
kwargs = {
'transaction_per_migration': True,
'transactional_ddl': True,
}
configure_args = current_app.extensions['migrate'].configure_args
if configure_args:
kwargs.update(configure_args)
context.configure(connection=connection,
target_metadata=target_metadata,
# compare_type=True,
process_revision_directives=process_revision_directives,
**kwargs)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
|
Returns a pandas dataframe based on the query object
def get_df(self, query_obj=None):
"""Returns a pandas dataframe based on the query object"""
if not query_obj:
query_obj = self.query_obj()
if not query_obj:
return None
self.error_msg = ''
timestamp_format = None
if self.datasource.type == 'table':
dttm_col = self.datasource.get_col(query_obj['granularity'])
if dttm_col:
timestamp_format = dttm_col.python_date_format
# The datasource here can be different backend but the interface is common
self.results = self.datasource.query(query_obj)
self.query = self.results.query
self.status = self.results.status
self.error_message = self.results.error_message
df = self.results.df
# Transform the timestamp we received from database to pandas supported
# datetime format. If no python_date_format is specified, the pattern will
# be considered as the default ISO date format
# If the datetime format is unix, the parse will use the corresponding
# parsing logic.
if df is not None and not df.empty:
if DTTM_ALIAS in df.columns:
if timestamp_format in ('epoch_s', 'epoch_ms'):
# Column has already been formatted as a timestamp.
dttm_col = df[DTTM_ALIAS]
one_ts_val = dttm_col[0]
# convert time column to pandas Timestamp, but different
# ways to convert depending on string or int types
try:
int(one_ts_val)
is_integral = True
except ValueError:
is_integral = False
if is_integral:
unit = 's' if timestamp_format == 'epoch_s' else 'ms'
df[DTTM_ALIAS] = pd.to_datetime(dttm_col, utc=False, unit=unit,
origin='unix')
else:
df[DTTM_ALIAS] = dttm_col.apply(pd.Timestamp)
else:
df[DTTM_ALIAS] = pd.to_datetime(
df[DTTM_ALIAS], utc=False, format=timestamp_format)
if self.datasource.offset:
df[DTTM_ALIAS] += timedelta(hours=self.datasource.offset)
df[DTTM_ALIAS] += self.time_shift
if self.enforce_numerical_metrics:
self.df_metrics_to_num(df)
df.replace([np.inf, -np.inf], np.nan, inplace=True)
return df
|
Building a query object
def query_obj(self):
"""Building a query object"""
form_data = self.form_data
self.process_query_filters()
gb = form_data.get('groupby') or []
metrics = self.all_metrics or []
columns = form_data.get('columns') or []
groupby = []
for o in gb + columns:
if o not in groupby:
groupby.append(o)
is_timeseries = self.is_timeseries
if DTTM_ALIAS in groupby:
groupby.remove(DTTM_ALIAS)
is_timeseries = True
granularity = (
form_data.get('granularity') or
form_data.get('granularity_sqla')
)
limit = int(form_data.get('limit') or 0)
timeseries_limit_metric = form_data.get('timeseries_limit_metric')
row_limit = int(form_data.get('row_limit') or config.get('ROW_LIMIT'))
# default order direction
order_desc = form_data.get('order_desc', True)
since, until = utils.get_since_until(relative_end=relative_end,
time_range=form_data.get('time_range'),
since=form_data.get('since'),
until=form_data.get('until'))
time_shift = form_data.get('time_shift', '')
self.time_shift = utils.parse_human_timedelta(time_shift)
from_dttm = None if since is None else (since - self.time_shift)
to_dttm = None if until is None else (until - self.time_shift)
if from_dttm and to_dttm and from_dttm > to_dttm:
raise Exception(_('From date cannot be larger than to date'))
self.from_dttm = from_dttm
self.to_dttm = to_dttm
# extras are used to query elements specific to a datasource type
# for instance the extra where clause that applies only to Tables
extras = {
'where': form_data.get('where', ''),
'having': form_data.get('having', ''),
'having_druid': form_data.get('having_filters', []),
'time_grain_sqla': form_data.get('time_grain_sqla', ''),
'druid_time_origin': form_data.get('druid_time_origin', ''),
}
d = {
'granularity': granularity,
'from_dttm': from_dttm,
'to_dttm': to_dttm,
'is_timeseries': is_timeseries,
'groupby': groupby,
'metrics': metrics,
'row_limit': row_limit,
'filter': self.form_data.get('filters', []),
'timeseries_limit': limit,
'extras': extras,
'timeseries_limit_metric': timeseries_limit_metric,
'order_desc': order_desc,
'prequeries': [],
'is_prequery': False,
}
return d
|
The cache key is made out of the key/values in `query_obj`, plus any
other key/values in `extra`.
We remove datetime bounds that are hard values, and replace them with
the use-provided inputs to bounds, which may be time-relative (as in
"5 days ago" or "now").
The `extra` arguments are currently used by time shift queries, since
different time shifts wil differ only in the `from_dttm` and `to_dttm`
values which are stripped.
def cache_key(self, query_obj, **extra):
"""
The cache key is made out of the key/values in `query_obj`, plus any
other key/values in `extra`.
We remove datetime bounds that are hard values, and replace them with
the use-provided inputs to bounds, which may be time-relative (as in
"5 days ago" or "now").
The `extra` arguments are currently used by time shift queries, since
different time shifts wil differ only in the `from_dttm` and `to_dttm`
values which are stripped.
"""
cache_dict = copy.copy(query_obj)
cache_dict.update(extra)
for k in ['from_dttm', 'to_dttm']:
del cache_dict[k]
cache_dict['time_range'] = self.form_data.get('time_range')
cache_dict['datasource'] = self.datasource.uid
json_data = self.json_dumps(cache_dict, sort_keys=True)
return hashlib.md5(json_data.encode('utf-8')).hexdigest()
|
This is the data object serialized to the js layer
def data(self):
"""This is the data object serialized to the js layer"""
content = {
'form_data': self.form_data,
'token': self.token,
'viz_name': self.viz_type,
'filter_select_enabled': self.datasource.filter_select_enabled,
}
return content
|
Returns the query object for this visualization
def query_obj(self):
"""Returns the query object for this visualization"""
d = super().query_obj()
d['row_limit'] = self.form_data.get(
'row_limit', int(config.get('VIZ_ROW_LIMIT')))
numeric_columns = self.form_data.get('all_columns_x')
if numeric_columns is None:
raise Exception(_('Must have at least one numeric column specified'))
self.columns = numeric_columns
d['columns'] = numeric_columns + self.groupby
# override groupby entry to avoid aggregation
d['groupby'] = []
return d
|
Returns the chart data
def get_data(self, df):
"""Returns the chart data"""
chart_data = []
if len(self.groupby) > 0:
groups = df.groupby(self.groupby)
else:
groups = [((), df)]
for keys, data in groups:
chart_data.extend([{
'key': self.labelify(keys, column),
'values': data[column].tolist()}
for column in self.columns])
return chart_data
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.