task_type
stringclasses
4 values
code_task
stringclasses
15 values
start_line
int64
4
1.79k
end_line
int64
4
1.8k
before
stringlengths
79
76.1k
between
stringlengths
17
806
after
stringlengths
2
72.6k
reason_categories_output
stringlengths
2
2.24k
horizon_categories_output
stringlengths
83
3.99k
reason_freq_analysis
stringclasses
150 values
horizon_freq_analysis
stringlengths
23
185
completion_python
RL_Motion_Planning
260
260
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:']
[' ep_range = tf.range(ep_start, ep_end)']
[' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'Else Reasoning', 'usage_line': 260}]
Library 'tf' used at line 260 is imported at line 20 and has a Long-Range dependency. Variable 'ep_start' used at line 260 is defined at line 251 and has a Short-Range dependency. Variable 'ep_end' used at line 260 is defined at line 251 and has a Short-Range dependency.
{'Else Reasoning': 1}
{'Library Long-Range': 1, 'Variable Short-Range': 2}
completion_python
RL_Motion_Planning
265
265
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):']
[' buffered_data[key] = _data[index]']
[' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'Loop Body', 'usage_line': 265}]
Variable 'buffered_data' used at line 265 is defined at line 262 and has a Short-Range dependency. Variable 'key' used at line 265 is part of a Loop defined at line 264 and has a Short-Range dependency. Variable '_data' used at line 265 is defined at line 263 and has a Short-Range dependency. Variable 'index' used at line 265 is part of a Loop defined at line 264 and has a Short-Range dependency.
{'Loop Body': 1}
{'Variable Short-Range': 2, 'Variable Loop Short-Range': 2}
completion_python
RL_Motion_Planning
264
267
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)']
[' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data']
[' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'Define Stop Criteria', 'usage_line': 264}, {'reason_category': 'Loop Body', 'usage_line': 265}]
Variable 'self' used at line 264 is defined at line 251 and has a Medium-Range dependency. Variable 'buffered_data' used at line 265 is defined at line 262 and has a Short-Range dependency. Variable 'key' used at line 265 is part of a Loop defined at line 264 and has a Short-Range dependency. Variable '_data' used at line 265 is defined at line 263 and has a Short-Range dependency. Variable 'index' used at line 265 is part of a Loop defined at line 264 and has a Short-Range dependency. Variable 'buffered_data' used at line 266 is defined at line 262 and has a Short-Range dependency. Variable 'buffered_data' used at line 267 is defined at line 262 and has a Short-Range dependency.
{'Define Stop Criteria': 1, 'Loop Body': 1}
{'Variable Medium-Range': 1, 'Variable Short-Range': 4, 'Variable Loop Short-Range': 2}
completion_python
RL_Motion_Planning
284
284
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:']
[' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)']
[' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'Loop Body', 'usage_line': 284}]
Variable 'episode_batch' used at line 284 is defined at line 282 and has a Short-Range dependency. Variable 'key' used at line 284 is part of a Loop defined at line 283 and has a Short-Range dependency. Library 'tf' used at line 284 is imported at line 20 and has a Long-Range dependency. Variable 'episodes_batch' used at line 284 is defined at line 280 and has a Short-Range dependency. Variable 'ep_idx' used at line 284 is part of a Loop defined at line 281 and has a Short-Range dependency.
{'Loop Body': 1}
{'Variable Short-Range': 2, 'Variable Loop Short-Range': 2, 'Library Long-Range': 1}
completion_python
RL_Motion_Planning
289
289
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:']
[' num_to_ins = tf.cast(1, dtype=tf.int32)']
[' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'If Body', 'usage_line': 289}]
Library 'tf' used at line 289 is imported at line 20 and has a Long-Range dependency.
{'If Body': 1}
{'Library Long-Range': 1}
completion_python
RL_Motion_Planning
293
293
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:']
[' idxs = tf.range(self.current_size, self.current_size + num_to_ins)']
[' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'If Body', 'usage_line': 293}]
Library 'tf' used at line 293 is imported at line 20 and has a Long-Range dependency. Variable 'self' used at line 293 is defined at line 287 and has a Short-Range dependency. Variable 'num_to_ins' used at line 293 is defined at line 287 and has a Short-Range dependency.
{'If Body': 1}
{'Library Long-Range': 1, 'Variable Short-Range': 2}
completion_python
RL_Motion_Planning
295
298
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:']
[' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)']
[' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'Elif Body', 'usage_line': 295}, {'reason_category': 'Elif Body', 'usage_line': 296}, {'reason_category': 'Elif Body', 'usage_line': 297}, {'reason_category': 'Elif Body', 'usage_line': 298}]
Variable 'num_to_ins' used at line 295 is defined at line 287 and has a Short-Range dependency. Variable 'self' used at line 295 is defined at line 287 and has a Short-Range dependency. Library 'tf' used at line 296 is imported at line 20 and has a Long-Range dependency. Variable 'self' used at line 296 is defined at line 287 and has a Short-Range dependency. Library 'tf' used at line 297 is imported at line 20 and has a Long-Range dependency. Variable 'self' used at line 297 is defined at line 287 and has a Short-Range dependency. Variable 'overflow' used at line 297 is defined at line 295 and has a Short-Range dependency. Library 'tf' used at line 298 is imported at line 20 and has a Long-Range dependency. Variable 'idx_a' used at line 298 is defined at line 296 and has a Short-Range dependency. Variable 'idx_b' used at line 298 is defined at line 297 and has a Short-Range dependency.
{'Elif Body': 4}
{'Variable Short-Range': 7, 'Library Long-Range': 3}
completion_python
RL_Motion_Planning
300
300
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:']
[' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)']
[' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'Else Reasoning', 'usage_line': 300}]
Library 'tf' used at line 300 is imported at line 20 and has a Long-Range dependency. Variable 'self' used at line 300 is defined at line 287 and has a Medium-Range dependency. Variable 'num_to_ins' used at line 300 is defined at line 287 and has a Medium-Range dependency.
{'Else Reasoning': 1}
{'Library Long-Range': 1, 'Variable Medium-Range': 2}
completion_python
RL_Motion_Planning
308
308
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):']
[' return self.current_size']
[' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[]
Variable 'self' used at line 308 is defined at line 307 and has a Short-Range dependency.
{}
{'Variable Short-Range': 1}
completion_python
RL_Motion_Planning
311
311
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):']
[' return self.current_size * self.T']
[' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[]
Variable 'self' used at line 311 is defined at line 310 and has a Short-Range dependency.
{}
{'Variable Short-Range': 1}
completion_python
RL_Motion_Planning
314
314
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):']
[' self.current_size.assign(0)']
[' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[]
Variable 'self' used at line 314 is defined at line 313 and has a Short-Range dependency.
{}
{'Variable Short-Range': 1}
completion_python
RL_Motion_Planning
318
318
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):']
[' return self.current_size == self.buffer_size']
[' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[]
Variable 'self' used at line 318 is defined at line 317 and has a Short-Range dependency.
{}
{'Variable Short-Range': 1}
completion_python
RL_Motion_Planning
321
321
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):']
[' return self.current_size']
[' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[]
Variable 'self' used at line 321 is defined at line 320 and has a Short-Range dependency.
{}
{'Variable Short-Range': 1}
completion_python
RL_Motion_Planning
327
327
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):']
[' buffered_data[key] = _data[index]']
[' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'Loop Body', 'usage_line': 327}]
Variable 'buffered_data' used at line 327 is defined at line 324 and has a Short-Range dependency. Variable 'key' used at line 327 is part of a Loop defined at line 326 and has a Short-Range dependency. Variable '_data' used at line 327 is defined at line 325 and has a Short-Range dependency. Variable 'index' used at line 327 is part of a Loop defined at line 326 and has a Short-Range dependency.
{'Loop Body': 1}
{'Variable Short-Range': 2, 'Variable Loop Short-Range': 2}
completion_python
RL_Motion_Planning
338
338
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:']
[' self.clear_buffer()']
[' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'If Body', 'usage_line': 338}]
Variable 'self' used at line 338 is defined at line 332 and has a Short-Range dependency.
{'If Body': 1}
{'Variable Short-Range': 1}
completion_python
RL_Motion_Planning
343
346
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load']
[" idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)']
[' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'If Body', 'usage_line': 343}, {'reason_category': 'If Body', 'usage_line': 344}, {'reason_category': 'Define Stop Criteria', 'usage_line': 345}, {'reason_category': 'If Body', 'usage_line': 345}, {'reason_category': 'Loop Body', 'usage_line': 346}, {'reason_category': 'If Body', 'usage_line': 346}]
Library 'np' used at line 343 is imported at line 19 and has a Long-Range dependency. Variable 'buffered_data' used at line 343 is defined at line 332 and has a Medium-Range dependency. Variable 'num_demos_to_load' used at line 343 is defined at line 332 and has a Medium-Range dependency. Variable 'buffered_data' used at line 345 is defined at line 332 and has a Medium-Range dependency. Variable 'buffered_data' used at line 346 is defined at line 332 and has a Medium-Range dependency. Variable 'key' used at line 346 is part of a Loop defined at line 345 and has a Short-Range dependency. Library 'tf' used at line 346 is imported at line 20 and has a Long-Range dependency. Variable 'idxs' used at line 346 is defined at line 343 and has a Short-Range dependency.
{'If Body': 4, 'Define Stop Criteria': 1, 'Loop Body': 1}
{'Library Long-Range': 2, 'Variable Medium-Range': 4, 'Variable Loop Short-Range': 1, 'Variable Short-Range': 1}
completion_python
RL_Motion_Planning
346
346
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():']
[' buffered_data[key] = tf.gather(buffered_data[key], idxs)']
[' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'Loop Body', 'usage_line': 346}, {'reason_category': 'If Body', 'usage_line': 346}]
Variable 'buffered_data' used at line 346 is defined at line 332 and has a Medium-Range dependency. Variable 'key' used at line 346 is part of a Loop defined at line 345 and has a Short-Range dependency. Library 'tf' used at line 346 is imported at line 20 and has a Long-Range dependency. Variable 'idxs' used at line 346 is defined at line 343 and has a Short-Range dependency.
{'Loop Body': 1, 'If Body': 1}
{'Variable Medium-Range': 1, 'Variable Loop Short-Range': 1, 'Library Long-Range': 1, 'Variable Short-Range': 1}
completion_python
RL_Motion_Planning
395
398
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution']
[' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs']
[' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[]
Library 'tf' used at line 395 is imported at line 20 and has a Long-Range dependency. Variable 'actions' used at line 395 is defined at line 392 and has a Short-Range dependency. Variable 'mu' used at line 395 is defined at line 388 and has a Short-Range dependency. Variable 'std' used at line 395 is defined at line 390 and has a Short-Range dependency. Variable 'self' used at line 395 is defined at line 378 and has a Medium-Range dependency. Library 'tf' used at line 396 is imported at line 20 and has a Long-Range dependency. Variable 'log_probs' used at line 397 is defined at line 396 and has a Short-Range dependency. Variable 'log_probs' used at line 398 is defined at line 396 and has a Short-Range dependency.
{}
{'Library Long-Range': 2, 'Variable Short-Range': 5, 'Variable Medium-Range': 1}
completion_python
RL_Motion_Planning
415
415
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution']
[' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)']
[' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'If Body', 'usage_line': 415}]
Library 'tf' used at line 415 is imported at line 20 and has a Long-Range dependency. Variable 'mu' used at line 415 is defined at line 411 and has a Short-Range dependency. Variable 'self' used at line 415 is defined at line 400 and has a Medium-Range dependency.
{'If Body': 1}
{'Library Long-Range': 1, 'Variable Short-Range': 1, 'Variable Medium-Range': 1}
completion_python
RL_Motion_Planning
417
417
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:']
[' actions = mu']
[' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'Else Reasoning', 'usage_line': 417}]
Variable 'mu' used at line 417 is defined at line 411 and has a Short-Range dependency.
{'Else Reasoning': 1}
{'Variable Short-Range': 1}
completion_python
RL_Motion_Planning
480
480
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:']
[' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)']
[' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'If Body', 'usage_line': 480}]
Library 'tf' used at line 480 is imported at line 20 and has a Long-Range dependency. Variable 'self' used at line 480 is defined at line 466 and has a Medium-Range dependency.
{'If Body': 1}
{'Library Long-Range': 1, 'Variable Medium-Range': 1}
completion_python
RL_Motion_Planning
483
486
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:']
[' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)']
[' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'Else Reasoning', 'usage_line': 483}, {'reason_category': 'Else Reasoning', 'usage_line': 484}, {'reason_category': 'Else Reasoning', 'usage_line': 485}, {'reason_category': 'Else Reasoning', 'usage_line': 486}]
Variable 'self' used at line 483 is defined at line 466 and has a Medium-Range dependency. Library 'tf' used at line 483 is imported at line 20 and has a Long-Range dependency. Variable 'state' used at line 483 is defined at line 467 and has a Medium-Range dependency. Variable 'curr_goal' used at line 483 is defined at line 472 and has a Medium-Range dependency. Library 'tf' used at line 484 is imported at line 20 and has a Long-Range dependency. Variable 'action_mu' used at line 484 is defined at line 483 and has a Short-Range dependency. Variable 'stddev' used at line 484 is defined at line 466 and has a Medium-Range dependency. Variable 'action_mu' used at line 485 is defined at line 483 and has a Short-Range dependency. Variable 'action_dev' used at line 485 is defined at line 484 and has a Short-Range dependency. Library 'tf' used at line 486 is imported at line 20 and has a Long-Range dependency. Variable 'self' used at line 486 is defined at line 466 and has a Medium-Range dependency. Variable 'action' used at line 486 is defined at line 485 and has a Short-Range dependency.
{'Else Reasoning': 4}
{'Variable Medium-Range': 5, 'Library Long-Range': 3, 'Variable Short-Range': 4}
completion_python
RL_Motion_Planning
501
502
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """']
[' skill = tf.zeros((1, self.args.c_dim))', ' return skill']
[' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[]
Library 'tf' used at line 501 is imported at line 20 and has a Long-Range dependency. Variable 'self' used at line 501 is defined at line 497 and has a Short-Range dependency. Variable 'skill' used at line 502 is defined at line 501 and has a Short-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Short-Range': 2}
completion_python
RL_Motion_Planning
506
506
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):']
[' return g_env']
[' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[]
Variable 'g_env' used at line 506 is defined at line 505 and has a Short-Range dependency.
{}
{'Variable Short-Range': 1}
completion_python
RL_Motion_Planning
510
510
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill']
[' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))']
[' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[]
Variable 'self' used at line 510 is defined at line 508 and has a Short-Range dependency. Library 'tf' used at line 510 is imported at line 20 and has a Long-Range dependency. Library 'np' used at line 510 is imported at line 19 and has a Long-Range dependency.
{}
{'Variable Short-Range': 1, 'Library Long-Range': 2}
completion_python
RL_Motion_Planning
513
513
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):']
[' self.actor.save_weights(dir_param + "/policy.h5")']
[' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[]
Variable 'self' used at line 513 is defined at line 512 and has a Short-Range dependency. Variable 'dir_param' used at line 513 is defined at line 512 and has a Short-Range dependency.
{}
{'Variable Short-Range': 2}
completion_python
RL_Motion_Planning
516
516
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):']
[' self.actor.load_weights(dir_param + "/policy.h5")']
[' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[]
Variable 'self' used at line 516 is defined at line 515 and has a Short-Range dependency. Variable 'dir_param' used at line 516 is defined at line 515 and has a Short-Range dependency.
{}
{'Variable Short-Range': 2}
completion_python
RL_Motion_Planning
547
547
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):']
[' os.makedirs(args.dir_summary)']
[' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'If Body', 'usage_line': 547}]
Library 'os' used at line 547 is imported at line 8 and has a Long-Range dependency. Variable 'args' used at line 547 is defined at line 526 and has a Medium-Range dependency.
{'If Body': 1}
{'Library Long-Range': 1, 'Variable Medium-Range': 1}
completion_python
RL_Motion_Planning
550
550
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):']
[' os.makedirs(args.dir_plot)']
[' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'If Body', 'usage_line': 550}]
Library 'os' used at line 550 is imported at line 8 and has a Long-Range dependency. Variable 'args' used at line 550 is defined at line 526 and has a Medium-Range dependency.
{'If Body': 1}
{'Library Long-Range': 1, 'Variable Medium-Range': 1}
completion_python
RL_Motion_Planning
555
563
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:']
[' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()']
[' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'If Body', 'usage_line': 555}, {'reason_category': 'If Body', 'usage_line': 556}, {'reason_category': 'If Body', 'usage_line': 557}, {'reason_category': 'If Body', 'usage_line': 558}, {'reason_category': 'If Body', 'usage_line': 559}, {'reason_category': 'If Body', 'usage_line': 560}, {'reason_category': 'If Body', 'usage_line': 561}, {'reason_category': 'If Body', 'usage_line': 562}, {'reason_category': 'If Body', 'usage_line': 563}]
Variable 'self' used at line 555 is defined at line 526 and has a Medium-Range dependency. Library 'wandb' used at line 555 is imported at line 22 and has a Long-Range dependency. Variable 'args' used at line 556 is defined at line 526 and has a Medium-Range dependency. Variable 'args' used at line 557 is defined at line 526 and has a Long-Range dependency. Variable 'algo' used at line 558 is defined at line 526 and has a Long-Range dependency. Variable 'current_time' used at line 558 is defined at line 34 and has a Long-Range dependency. Library 'tf' used at line 562 is imported at line 20 and has a Long-Range dependency. Library 'tf' used at line 563 is imported at line 20 and has a Long-Range dependency.
{'If Body': 9}
{'Variable Medium-Range': 2, 'Library Long-Range': 3, 'Variable Long-Range': 3}
completion_python
RL_Motion_Planning
566
567
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):']
[' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item']
[' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[]
Library 'tf' used at line 566 is imported at line 20 and has a Long-Range dependency. Variable 'self' used at line 566 is defined at line 565 and has a Short-Range dependency. Variable 'item' used at line 567 is defined at line 566 and has a Short-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Short-Range': 2}
completion_python
RL_Motion_Planning
571
571
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):']
[' os.makedirs(dir_param)']
[' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'If Body', 'usage_line': 571}]
Library 'os' used at line 571 is imported at line 8 and has a Long-Range dependency. Variable 'dir_param' used at line 571 is defined at line 569 and has a Short-Range dependency.
{'If Body': 1}
{'Library Long-Range': 1, 'Variable Short-Range': 1}
completion_python
RL_Motion_Planning
575
575
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):']
[' self.model.load_(dir_param)']
[' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[]
Variable 'self' used at line 575 is defined at line 574 and has a Short-Range dependency. Variable 'dir_param' used at line 575 is defined at line 574 and has a Short-Range dependency.
{}
{'Variable Short-Range': 2}
completion_python
RL_Motion_Planning
590
590
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:']
[" trans['goals'] = trans['her_goals']"]
[' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'If Body', 'usage_line': 590}]
Variable 'trans' used at line 590 is defined at line 587 and has a Short-Range dependency.
{'If Body': 1}
{'Variable Short-Range': 1}
completion_python
RL_Motion_Planning
592
592
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:']
[" trans['goals'] = trans['env_goals']"]
[' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'Else Reasoning', 'usage_line': 592}]
Variable 'trans' used at line 592 is defined at line 587 and has a Short-Range dependency.
{'Else Reasoning': 1}
{'Variable Short-Range': 1}
completion_python
RL_Motion_Planning
607
607
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():']
[' trans[key] = tf.cast(trans[key], dtype=tf.float32)']
[' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'Loop Body', 'usage_line': 607}]
Variable 'trans' used at line 607 is defined at line 603 and has a Short-Range dependency. Variable 'key' used at line 607 is part of a Loop defined at line 606 and has a Short-Range dependency. Library 'tf' used at line 607 is imported at line 20 and has a Long-Range dependency.
{'Loop Body': 1}
{'Variable Short-Range': 1, 'Variable Loop Short-Range': 1, 'Library Long-Range': 1}
completion_python
RL_Motion_Planning
606
609
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32']
[' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans']
[' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'Define Stop Criteria', 'usage_line': 606}, {'reason_category': 'Loop Body', 'usage_line': 607}]
Variable 'trans' used at line 606 is defined at line 603 and has a Short-Range dependency. Variable 'trans' used at line 607 is defined at line 603 and has a Short-Range dependency. Variable 'key' used at line 607 is part of a Loop defined at line 606 and has a Short-Range dependency. Library 'tf' used at line 607 is imported at line 20 and has a Long-Range dependency. Variable 'trans' used at line 608 is defined at line 603 and has a Short-Range dependency. Variable 'trans' used at line 609 is defined at line 603 and has a Short-Range dependency.
{'Define Stop Criteria': 1, 'Loop Body': 1}
{'Variable Short-Range': 4, 'Variable Loop Short-Range': 1, 'Library Long-Range': 1}
completion_python
RL_Motion_Planning
623
627
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:']
[' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()']
[' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'If Body', 'usage_line': 623}, {'reason_category': 'Loop Body', 'usage_line': 623}, {'reason_category': 'If Body', 'usage_line': 624}, {'reason_category': 'Loop Body', 'usage_line': 624}, {'reason_category': 'If Body', 'usage_line': 625}, {'reason_category': 'Loop Body', 'usage_line': 625}, {'reason_category': 'If Body', 'usage_line': 626}, {'reason_category': 'Loop Body', 'usage_line': 626}, {'reason_category': 'If Body', 'usage_line': 627}, {'reason_category': 'Loop Body', 'usage_line': 627}]
Variable 'skill' used at line 623 is part of a Loop defined at line 619 and has a Short-Range dependency. Variable 'self' used at line 623 is defined at line 611 and has a Medium-Range dependency. Variable 'skill' used at line 624 is part of a Loop defined at line 619 and has a Short-Range dependency. Library 'tf' used at line 624 is imported at line 20 and has a Long-Range dependency. Variable 'skill' used at line 627 is part of a Loop defined at line 619 and has a Short-Range dependency.
{'If Body': 5, 'Loop Body': 5}
{'Variable Loop Short-Range': 3, 'Variable Medium-Range': 1, 'Library Long-Range': 1}
completion_python
RL_Motion_Planning
638
640
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ']
[' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])']
[' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'If Body', 'usage_line': 638}, {'reason_category': 'Loop Body', 'usage_line': 638}, {'reason_category': 'If Condition', 'usage_line': 638}, {'reason_category': 'If Body', 'usage_line': 639}, {'reason_category': 'Loop Body', 'usage_line': 639}, {'reason_category': 'Define Stop Criteria', 'usage_line': 639}, {'reason_category': 'If Body', 'usage_line': 640}, {'reason_category': 'Loop Body', 'usage_line': 640}]
Variable 'skill' used at line 638 is part of a Loop defined at line 636 and has a Short-Range dependency. Variable 'keys' used at line 639 is defined at line 617 and has a Medium-Range dependency. Variable 'combined_transitions' used at line 640 is defined at line 634 and has a Short-Range dependency. Variable 'key' used at line 640 is part of a Loop defined at line 639 and has a Short-Range dependency. Variable 'skill' used at line 640 is part of a Loop defined at line 636 and has a Short-Range dependency.
{'If Body': 3, 'Loop Body': 3, 'If Condition': 1, 'Define Stop Criteria': 1}
{'Variable Loop Short-Range': 3, 'Variable Medium-Range': 1, 'Variable Short-Range': 1}
completion_python
RL_Motion_Planning
648
650
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):']
[' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )']
[' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'Elif Body', 'usage_line': 648}, {'reason_category': 'Elif Body', 'usage_line': 649}, {'reason_category': 'Elif Body', 'usage_line': 650}]
Variable 'self' used at line 648 is defined at line 611 and has a Long-Range dependency. Variable 'transitions' used at line 649 is defined at line 648 and has a Short-Range dependency. Library 'tf' used at line 649 is imported at line 20 and has a Long-Range dependency.
{'Elif Body': 3}
{'Variable Long-Range': 1, 'Variable Short-Range': 1, 'Library Long-Range': 1}
completion_python
RL_Motion_Planning
669
671
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():']
[' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])']
[' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'If Condition', 'usage_line': 669}, {'reason_category': 'Loop Body', 'usage_line': 669}, {'reason_category': 'Loop Body', 'usage_line': 670}, {'reason_category': 'If Body', 'usage_line': 670}, {'reason_category': 'Loop Body', 'usage_line': 671}]
Variable 'key' used at line 669 is part of a Loop defined at line 668 and has a Short-Range dependency. Variable 'avg_loss_dict' used at line 669 is defined at line 667 and has a Short-Range dependency. Variable 'avg_loss_dict' used at line 670 is defined at line 667 and has a Short-Range dependency. Variable 'key' used at line 670 is part of a Loop defined at line 668 and has a Short-Range dependency. Variable 'avg_loss_dict' used at line 671 is defined at line 667 and has a Short-Range dependency. Variable 'key' used at line 671 is part of a Loop defined at line 668 and has a Short-Range dependency. Variable 'loss_dict' used at line 671 is defined at line 664 and has a Short-Range dependency.
{'If Condition': 1, 'Loop Body': 3, 'If Body': 1}
{'Variable Loop Short-Range': 3, 'Variable Short-Range': 4}
completion_python
RL_Motion_Planning
670
670
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():']
[' avg_loss_dict[key] = []']
[' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'Loop Body', 'usage_line': 670}, {'reason_category': 'If Body', 'usage_line': 670}]
Variable 'avg_loss_dict' used at line 670 is defined at line 667 and has a Short-Range dependency. Variable 'key' used at line 670 is part of a Loop defined at line 668 and has a Short-Range dependency.
{'Loop Body': 1, 'If Body': 1}
{'Variable Short-Range': 1, 'Variable Loop Short-Range': 1}
completion_python
RL_Motion_Planning
673
673
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():']
[' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])']
[' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'Loop Body', 'usage_line': 673}]
Variable 'avg_loss_dict' used at line 673 is defined at line 667 and has a Short-Range dependency. Variable 'key' used at line 673 is part of a Loop defined at line 672 and has a Short-Range dependency. Library 'tf' used at line 673 is imported at line 20 and has a Long-Range dependency.
{'Loop Body': 1}
{'Variable Short-Range': 1, 'Variable Loop Short-Range': 1, 'Library Long-Range': 1}
completion_python
RL_Motion_Planning
672
675
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])']
[' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict']
[' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'Define Stop Criteria', 'usage_line': 672}, {'reason_category': 'Loop Body', 'usage_line': 673}]
Variable 'avg_loss_dict' used at line 672 is defined at line 667 and has a Short-Range dependency. Variable 'avg_loss_dict' used at line 673 is defined at line 667 and has a Short-Range dependency. Variable 'key' used at line 673 is part of a Loop defined at line 672 and has a Short-Range dependency. Library 'tf' used at line 673 is imported at line 20 and has a Long-Range dependency. Variable 'avg_loss_dict' used at line 674 is defined at line 667 and has a Short-Range dependency. Variable 'avg_loss_dict' used at line 675 is defined at line 667 and has a Short-Range dependency.
{'Define Stop Criteria': 1, 'Loop Body': 1}
{'Variable Short-Range': 4, 'Variable Loop Short-Range': 1, 'Library Long-Range': 1}
completion_python
RL_Motion_Planning
717
717
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():']
[' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()']
[' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'Loop Body', 'usage_line': 717}]
Variable 'avg_loss_dict' used at line 717 is defined at line 715 and has a Short-Range dependency. Variable 'key' used at line 717 is part of a Loop defined at line 716 and has a Short-Range dependency.
{'Loop Body': 1}
{'Variable Short-Range': 1, 'Variable Loop Short-Range': 1}
completion_python
RL_Motion_Planning
751
751
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:']
[' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal']
[' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'If Body', 'usage_line': 751}]
Variable 'args' used at line 751 is defined at line 735 and has a Medium-Range dependency.
{'If Body': 1}
{'Variable Medium-Range': 1}
completion_python
RL_Motion_Planning
753
753
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:']
[' args.ag_dim = 3 # Goal/Object position in the 3D space']
[' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'Else Reasoning', 'usage_line': 753}]
Variable 'args' used at line 753 is defined at line 735 and has a Medium-Range dependency.
{'Else Reasoning': 1}
{'Variable Medium-Range': 1}
completion_python
RL_Motion_Planning
868
868
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)"]
[' args.c_dim = args.num_skills']
[' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'If Body', 'usage_line': 868}]
Variable 'args' used at line 868 is defined at line 859 and has a Short-Range dependency.
{'If Body': 1}
{'Variable Short-Range': 1}
completion_python
RL_Motion_Planning
874
874
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':"]
[' args.c_dim = 3']
[" elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'If Body', 'usage_line': 874}]
Variable 'args' used at line 874 is defined at line 859 and has a Medium-Range dependency.
{'If Body': 1}
{'Variable Medium-Range': 1}
completion_python
Timeseries_Clustering
23
23
['import pandas as pd', 'import numpy as np', 'import scipy.stats as stats', 'from sklearn.decomposition import PCA', 'from sklearn import metrics', 'from sklearn.cluster import DBSCAN', 'from sklearn.manifold import TSNE', 'from sklearn import preprocessing', 'from sklearn.cluster import KMeans', 'from sklearn.cluster import AgglomerativeClustering', 'import scipy.cluster.hierarchy as shc', 'import itertools', 'import math', 'import random', 'from numpy import linalg as LA', 'from scipy.stats import pearsonr', 'from statsmodels.tsa.stattools import coint', '', '#load dataframe', "norm_ret_df = pd.read_csv('./norm_ret_df.csv', index_col = False)", '#perform PCA with 10 components, whiten=True -> will return 10 principle components for each asset', 'pca = PCA(n_components=10, whiten=True)']
['beta=pca.fit_transform(norm_ret_df.T)']
['df_beta=pd.DataFrame(beta)', 'stock_pca = df_beta.values', 'print(stock_pca.shape)', '', '#standardise principal component array with prepcoessing.StandardScaler() and apply this with fit_transform to the pca_stock array ', 'X = preprocessing.StandardScaler().fit_transform(stock_pca)', '', '######### DBSCAN #########', '# #perform DBSCAN clustering algorithm on preprocessed data eps=2, min_samples =3', 'dbscan = DBSCAN(eps=2, min_samples=3)', 'dbscan.fit_predict(X)', 'labels = dbscan.labels_', 'n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)', 'print(labels)', '', '#attach cluster labels to the last column norm_ret_df', "df_beta['labels']=labels", 'df_beta[\'labels\'] = df_beta[\'labels\'].astype("category")', 'df_beta=df_beta.set_index(norm_ret_df.T.index)', "df_beta.sort_values(by=['labels'])", 'print(df_beta)', '', '#sort the rows with the same label into each of their own dataframes', 'k_list=np.arange(0,n_clusters_,1)', 'k_list', 'd = {}', 'for k in k_list:', ' d[k] = pd.DataFrame()', " d[k] = df_beta[df_beta['labels'] ==k]", '', 'print(d[0])', 'print(d[1])', 'print(d[2])', 'print(d[3])', 'print(d[4])', '', '#######KMEANS#######', '#run kmeans on pre processed data (X) set clusters to 5, n_init = 10, and random_state=42 so alog is iniitialised the same every time', 'kmeans = KMeans(n_clusters=5, n_init=10, random_state=42)', 'kmeans.fit(X)', 'label_kmeans=kmeans.predict(X) ', 'center_kmeans=kmeans.cluster_centers_', 'print(label_kmeans)', 'print(center_kmeans)', '', '####Hierarchical Clustering#####', '# run agglomerative hierarchical clustering on preprocessed data X', 'clusters = 5', "hc = AgglomerativeClustering(n_clusters= clusters, affinity='euclidean', linkage='ward')", 'labels_hc = hc.fit_predict(X)', 'print(labels_hc)', '', '###optimal pairs via statistical analysis with DBSCAN clusters', '', '#fetch normalised log returns for assets belonging to DBSCAN cluster 1 (label = 1)', 'cluster1_asset_list = d[1].index.values', 'clusters_norm_ret_df = norm_ret_df[cluster1_asset_list]', 'print(clusters_norm_ret_df)', '', '#cumulative returns', 'cumulative_norm_ret=clusters_norm_ret_df.cumsum()', '', '#optimal pairs via minimum sum of Euclidean squared distances btw cumulative log normalised returns', 'pair_order_list = itertools.combinations(cluster1_asset_list,2)', 'pairs=list(pair_order_list)', 'asset1_list=[]', 'asset2_list=[]', 'euclidean_distance_list=[]', 'for i in range(0,len(pairs)):', ' asset1_list.append(pairs[i][0])', ' asset2_list.append(pairs[i][1])', '', ' dist = LA.norm(cumulative_norm_ret[asset1_list[i]]-cumulative_norm_ret[asset2_list[i]])', ' euclidean_distance_list.append(dist)', '', '# asset1_list,asset2_list', '', 'sdd_list=list(zip(pairs,euclidean_distance_list))', 'sdd_list.sort(key = lambda x: x[1])', '', '#sort every pairwise combination based off of the euclidean squared distances. A unique optimal pair will occur with the minimum.', '# example: if pair A and B have a distance of 2 and A and C have a distance of 3 and C and D have a distance of 4, the optimal pairing would be (A,B) and (C,D)', '# write the pairs in the tuple form (A,B) returns a list of these unique optimal pairs', '# Each asset in a pair should not have previously been paired with another (no repeating assets per pairs)', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sdd_list)):', ' sdd1.append(sdd_list[i][0][0])', ' sdd2.append(sdd_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_messd = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sdd_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_messd.append(pair)', '', ' if len(selected_pairs_messd) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_messd)', '', '####### optimal pairs through correlation strategy use the normalised log returns of DBSCAN cluster1 assets', '', '#calculate pearson correlation for every possible pairing of assets', 'pearson_corr_list=[]', '', 'for i in range(0,len(pairs)):', ' corr= pearsonr(clusters_norm_ret_df[pairs[i][0]],clusters_norm_ret_df[pairs[i][1]])[0]', ' pearson_corr_list.append(corr)', '', '#sort pairs by pearson correlation', 'sort_corr_list=list(zip(pairs,pearson_corr_list))', 'sort_corr_list.sort(key = lambda x: x[1])', '', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sort_corr_list)):', ' sdd1.append(sort_corr_list[i][0][0])', ' sdd2.append(sort_corr_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_corr = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sort_corr_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_corr.append(pair)', '', ' if len(selected_pairs_corr) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_corr)', '', '###### check which asset pairs are cointegrated from DSCAN cluster 1', 'coint_pairs=[]', 'np.random.seed(107)', 'for i in range(0,len(pairs)):', ' score, pvalue, _ = coint(np.cumsum(clusters_norm_ret_df[pairs[i][0]]),np.cumsum(clusters_norm_ret_df[pairs[i][1]]))', ' confidence_level = 0.05', ' if pvalue < confidence_level:', ' coint_pairs.append(pairs[i])', 'print(coint_pairs)']
[]
Variable 'pca' used at line 23 is defined at line 22 and has a Short-Range dependency. Variable 'norm_ret_df' used at line 23 is defined at line 20 and has a Short-Range dependency.
{}
{'Variable Short-Range': 2}
completion_python
Timeseries_Clustering
34
34
['import pandas as pd', 'import numpy as np', 'import scipy.stats as stats', 'from sklearn.decomposition import PCA', 'from sklearn import metrics', 'from sklearn.cluster import DBSCAN', 'from sklearn.manifold import TSNE', 'from sklearn import preprocessing', 'from sklearn.cluster import KMeans', 'from sklearn.cluster import AgglomerativeClustering', 'import scipy.cluster.hierarchy as shc', 'import itertools', 'import math', 'import random', 'from numpy import linalg as LA', 'from scipy.stats import pearsonr', 'from statsmodels.tsa.stattools import coint', '', '#load dataframe', "norm_ret_df = pd.read_csv('./norm_ret_df.csv', index_col = False)", '#perform PCA with 10 components, whiten=True -> will return 10 principle components for each asset', 'pca = PCA(n_components=10, whiten=True)', 'beta=pca.fit_transform(norm_ret_df.T)', 'df_beta=pd.DataFrame(beta)', 'stock_pca = df_beta.values', 'print(stock_pca.shape)', '', '#standardise principal component array with prepcoessing.StandardScaler() and apply this with fit_transform to the pca_stock array ', 'X = preprocessing.StandardScaler().fit_transform(stock_pca)', '', '######### DBSCAN #########', '# #perform DBSCAN clustering algorithm on preprocessed data eps=2, min_samples =3', 'dbscan = DBSCAN(eps=2, min_samples=3)']
['dbscan.fit_predict(X)']
['labels = dbscan.labels_', 'n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)', 'print(labels)', '', '#attach cluster labels to the last column norm_ret_df', "df_beta['labels']=labels", 'df_beta[\'labels\'] = df_beta[\'labels\'].astype("category")', 'df_beta=df_beta.set_index(norm_ret_df.T.index)', "df_beta.sort_values(by=['labels'])", 'print(df_beta)', '', '#sort the rows with the same label into each of their own dataframes', 'k_list=np.arange(0,n_clusters_,1)', 'k_list', 'd = {}', 'for k in k_list:', ' d[k] = pd.DataFrame()', " d[k] = df_beta[df_beta['labels'] ==k]", '', 'print(d[0])', 'print(d[1])', 'print(d[2])', 'print(d[3])', 'print(d[4])', '', '#######KMEANS#######', '#run kmeans on pre processed data (X) set clusters to 5, n_init = 10, and random_state=42 so alog is iniitialised the same every time', 'kmeans = KMeans(n_clusters=5, n_init=10, random_state=42)', 'kmeans.fit(X)', 'label_kmeans=kmeans.predict(X) ', 'center_kmeans=kmeans.cluster_centers_', 'print(label_kmeans)', 'print(center_kmeans)', '', '####Hierarchical Clustering#####', '# run agglomerative hierarchical clustering on preprocessed data X', 'clusters = 5', "hc = AgglomerativeClustering(n_clusters= clusters, affinity='euclidean', linkage='ward')", 'labels_hc = hc.fit_predict(X)', 'print(labels_hc)', '', '###optimal pairs via statistical analysis with DBSCAN clusters', '', '#fetch normalised log returns for assets belonging to DBSCAN cluster 1 (label = 1)', 'cluster1_asset_list = d[1].index.values', 'clusters_norm_ret_df = norm_ret_df[cluster1_asset_list]', 'print(clusters_norm_ret_df)', '', '#cumulative returns', 'cumulative_norm_ret=clusters_norm_ret_df.cumsum()', '', '#optimal pairs via minimum sum of Euclidean squared distances btw cumulative log normalised returns', 'pair_order_list = itertools.combinations(cluster1_asset_list,2)', 'pairs=list(pair_order_list)', 'asset1_list=[]', 'asset2_list=[]', 'euclidean_distance_list=[]', 'for i in range(0,len(pairs)):', ' asset1_list.append(pairs[i][0])', ' asset2_list.append(pairs[i][1])', '', ' dist = LA.norm(cumulative_norm_ret[asset1_list[i]]-cumulative_norm_ret[asset2_list[i]])', ' euclidean_distance_list.append(dist)', '', '# asset1_list,asset2_list', '', 'sdd_list=list(zip(pairs,euclidean_distance_list))', 'sdd_list.sort(key = lambda x: x[1])', '', '#sort every pairwise combination based off of the euclidean squared distances. A unique optimal pair will occur with the minimum.', '# example: if pair A and B have a distance of 2 and A and C have a distance of 3 and C and D have a distance of 4, the optimal pairing would be (A,B) and (C,D)', '# write the pairs in the tuple form (A,B) returns a list of these unique optimal pairs', '# Each asset in a pair should not have previously been paired with another (no repeating assets per pairs)', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sdd_list)):', ' sdd1.append(sdd_list[i][0][0])', ' sdd2.append(sdd_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_messd = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sdd_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_messd.append(pair)', '', ' if len(selected_pairs_messd) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_messd)', '', '####### optimal pairs through correlation strategy use the normalised log returns of DBSCAN cluster1 assets', '', '#calculate pearson correlation for every possible pairing of assets', 'pearson_corr_list=[]', '', 'for i in range(0,len(pairs)):', ' corr= pearsonr(clusters_norm_ret_df[pairs[i][0]],clusters_norm_ret_df[pairs[i][1]])[0]', ' pearson_corr_list.append(corr)', '', '#sort pairs by pearson correlation', 'sort_corr_list=list(zip(pairs,pearson_corr_list))', 'sort_corr_list.sort(key = lambda x: x[1])', '', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sort_corr_list)):', ' sdd1.append(sort_corr_list[i][0][0])', ' sdd2.append(sort_corr_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_corr = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sort_corr_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_corr.append(pair)', '', ' if len(selected_pairs_corr) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_corr)', '', '###### check which asset pairs are cointegrated from DSCAN cluster 1', 'coint_pairs=[]', 'np.random.seed(107)', 'for i in range(0,len(pairs)):', ' score, pvalue, _ = coint(np.cumsum(clusters_norm_ret_df[pairs[i][0]]),np.cumsum(clusters_norm_ret_df[pairs[i][1]]))', ' confidence_level = 0.05', ' if pvalue < confidence_level:', ' coint_pairs.append(pairs[i])', 'print(coint_pairs)']
[]
Variable 'dbscan' used at line 34 is defined at line 33 and has a Short-Range dependency. Variable 'X' used at line 34 is defined at line 29 and has a Short-Range dependency.
{}
{'Variable Short-Range': 2}
completion_python
Timeseries_Clustering
51
52
['import pandas as pd', 'import numpy as np', 'import scipy.stats as stats', 'from sklearn.decomposition import PCA', 'from sklearn import metrics', 'from sklearn.cluster import DBSCAN', 'from sklearn.manifold import TSNE', 'from sklearn import preprocessing', 'from sklearn.cluster import KMeans', 'from sklearn.cluster import AgglomerativeClustering', 'import scipy.cluster.hierarchy as shc', 'import itertools', 'import math', 'import random', 'from numpy import linalg as LA', 'from scipy.stats import pearsonr', 'from statsmodels.tsa.stattools import coint', '', '#load dataframe', "norm_ret_df = pd.read_csv('./norm_ret_df.csv', index_col = False)", '#perform PCA with 10 components, whiten=True -> will return 10 principle components for each asset', 'pca = PCA(n_components=10, whiten=True)', 'beta=pca.fit_transform(norm_ret_df.T)', 'df_beta=pd.DataFrame(beta)', 'stock_pca = df_beta.values', 'print(stock_pca.shape)', '', '#standardise principal component array with prepcoessing.StandardScaler() and apply this with fit_transform to the pca_stock array ', 'X = preprocessing.StandardScaler().fit_transform(stock_pca)', '', '######### DBSCAN #########', '# #perform DBSCAN clustering algorithm on preprocessed data eps=2, min_samples =3', 'dbscan = DBSCAN(eps=2, min_samples=3)', 'dbscan.fit_predict(X)', 'labels = dbscan.labels_', 'n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)', 'print(labels)', '', '#attach cluster labels to the last column norm_ret_df', "df_beta['labels']=labels", 'df_beta[\'labels\'] = df_beta[\'labels\'].astype("category")', 'df_beta=df_beta.set_index(norm_ret_df.T.index)', "df_beta.sort_values(by=['labels'])", 'print(df_beta)', '', '#sort the rows with the same label into each of their own dataframes', 'k_list=np.arange(0,n_clusters_,1)', 'k_list', 'd = {}', 'for k in k_list:']
[' d[k] = pd.DataFrame()', " d[k] = df_beta[df_beta['labels'] ==k]"]
['', 'print(d[0])', 'print(d[1])', 'print(d[2])', 'print(d[3])', 'print(d[4])', '', '#######KMEANS#######', '#run kmeans on pre processed data (X) set clusters to 5, n_init = 10, and random_state=42 so alog is iniitialised the same every time', 'kmeans = KMeans(n_clusters=5, n_init=10, random_state=42)', 'kmeans.fit(X)', 'label_kmeans=kmeans.predict(X) ', 'center_kmeans=kmeans.cluster_centers_', 'print(label_kmeans)', 'print(center_kmeans)', '', '####Hierarchical Clustering#####', '# run agglomerative hierarchical clustering on preprocessed data X', 'clusters = 5', "hc = AgglomerativeClustering(n_clusters= clusters, affinity='euclidean', linkage='ward')", 'labels_hc = hc.fit_predict(X)', 'print(labels_hc)', '', '###optimal pairs via statistical analysis with DBSCAN clusters', '', '#fetch normalised log returns for assets belonging to DBSCAN cluster 1 (label = 1)', 'cluster1_asset_list = d[1].index.values', 'clusters_norm_ret_df = norm_ret_df[cluster1_asset_list]', 'print(clusters_norm_ret_df)', '', '#cumulative returns', 'cumulative_norm_ret=clusters_norm_ret_df.cumsum()', '', '#optimal pairs via minimum sum of Euclidean squared distances btw cumulative log normalised returns', 'pair_order_list = itertools.combinations(cluster1_asset_list,2)', 'pairs=list(pair_order_list)', 'asset1_list=[]', 'asset2_list=[]', 'euclidean_distance_list=[]', 'for i in range(0,len(pairs)):', ' asset1_list.append(pairs[i][0])', ' asset2_list.append(pairs[i][1])', '', ' dist = LA.norm(cumulative_norm_ret[asset1_list[i]]-cumulative_norm_ret[asset2_list[i]])', ' euclidean_distance_list.append(dist)', '', '# asset1_list,asset2_list', '', 'sdd_list=list(zip(pairs,euclidean_distance_list))', 'sdd_list.sort(key = lambda x: x[1])', '', '#sort every pairwise combination based off of the euclidean squared distances. A unique optimal pair will occur with the minimum.', '# example: if pair A and B have a distance of 2 and A and C have a distance of 3 and C and D have a distance of 4, the optimal pairing would be (A,B) and (C,D)', '# write the pairs in the tuple form (A,B) returns a list of these unique optimal pairs', '# Each asset in a pair should not have previously been paired with another (no repeating assets per pairs)', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sdd_list)):', ' sdd1.append(sdd_list[i][0][0])', ' sdd2.append(sdd_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_messd = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sdd_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_messd.append(pair)', '', ' if len(selected_pairs_messd) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_messd)', '', '####### optimal pairs through correlation strategy use the normalised log returns of DBSCAN cluster1 assets', '', '#calculate pearson correlation for every possible pairing of assets', 'pearson_corr_list=[]', '', 'for i in range(0,len(pairs)):', ' corr= pearsonr(clusters_norm_ret_df[pairs[i][0]],clusters_norm_ret_df[pairs[i][1]])[0]', ' pearson_corr_list.append(corr)', '', '#sort pairs by pearson correlation', 'sort_corr_list=list(zip(pairs,pearson_corr_list))', 'sort_corr_list.sort(key = lambda x: x[1])', '', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sort_corr_list)):', ' sdd1.append(sort_corr_list[i][0][0])', ' sdd2.append(sort_corr_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_corr = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sort_corr_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_corr.append(pair)', '', ' if len(selected_pairs_corr) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_corr)', '', '###### check which asset pairs are cointegrated from DSCAN cluster 1', 'coint_pairs=[]', 'np.random.seed(107)', 'for i in range(0,len(pairs)):', ' score, pvalue, _ = coint(np.cumsum(clusters_norm_ret_df[pairs[i][0]]),np.cumsum(clusters_norm_ret_df[pairs[i][1]]))', ' confidence_level = 0.05', ' if pvalue < confidence_level:', ' coint_pairs.append(pairs[i])', 'print(coint_pairs)']
[{'reason_category': 'Loop Body', 'usage_line': 51}, {'reason_category': 'Loop Body', 'usage_line': 52}]
Variable 'd' used at line 51 is defined at line 49 and has a Short-Range dependency. Variable 'k' used at line 51 is part of a Loop defined at line 50 and has a Short-Range dependency. Library 'pd' used at line 51 is imported at line 1 and has a Long-Range dependency. Variable 'd' used at line 52 is defined at line 49 and has a Short-Range dependency. Variable 'k' used at line 52 is part of a Loop defined at line 50 and has a Short-Range dependency. Variable 'df_beta' used at line 52 is defined at line 42 and has a Short-Range dependency.
{'Loop Body': 2}
{'Variable Short-Range': 3, 'Variable Loop Short-Range': 2, 'Library Long-Range': 1}
completion_python
Timeseries_Clustering
63
63
['import pandas as pd', 'import numpy as np', 'import scipy.stats as stats', 'from sklearn.decomposition import PCA', 'from sklearn import metrics', 'from sklearn.cluster import DBSCAN', 'from sklearn.manifold import TSNE', 'from sklearn import preprocessing', 'from sklearn.cluster import KMeans', 'from sklearn.cluster import AgglomerativeClustering', 'import scipy.cluster.hierarchy as shc', 'import itertools', 'import math', 'import random', 'from numpy import linalg as LA', 'from scipy.stats import pearsonr', 'from statsmodels.tsa.stattools import coint', '', '#load dataframe', "norm_ret_df = pd.read_csv('./norm_ret_df.csv', index_col = False)", '#perform PCA with 10 components, whiten=True -> will return 10 principle components for each asset', 'pca = PCA(n_components=10, whiten=True)', 'beta=pca.fit_transform(norm_ret_df.T)', 'df_beta=pd.DataFrame(beta)', 'stock_pca = df_beta.values', 'print(stock_pca.shape)', '', '#standardise principal component array with prepcoessing.StandardScaler() and apply this with fit_transform to the pca_stock array ', 'X = preprocessing.StandardScaler().fit_transform(stock_pca)', '', '######### DBSCAN #########', '# #perform DBSCAN clustering algorithm on preprocessed data eps=2, min_samples =3', 'dbscan = DBSCAN(eps=2, min_samples=3)', 'dbscan.fit_predict(X)', 'labels = dbscan.labels_', 'n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)', 'print(labels)', '', '#attach cluster labels to the last column norm_ret_df', "df_beta['labels']=labels", 'df_beta[\'labels\'] = df_beta[\'labels\'].astype("category")', 'df_beta=df_beta.set_index(norm_ret_df.T.index)', "df_beta.sort_values(by=['labels'])", 'print(df_beta)', '', '#sort the rows with the same label into each of their own dataframes', 'k_list=np.arange(0,n_clusters_,1)', 'k_list', 'd = {}', 'for k in k_list:', ' d[k] = pd.DataFrame()', " d[k] = df_beta[df_beta['labels'] ==k]", '', 'print(d[0])', 'print(d[1])', 'print(d[2])', 'print(d[3])', 'print(d[4])', '', '#######KMEANS#######', '#run kmeans on pre processed data (X) set clusters to 5, n_init = 10, and random_state=42 so alog is iniitialised the same every time', 'kmeans = KMeans(n_clusters=5, n_init=10, random_state=42)']
['kmeans.fit(X)']
['label_kmeans=kmeans.predict(X) ', 'center_kmeans=kmeans.cluster_centers_', 'print(label_kmeans)', 'print(center_kmeans)', '', '####Hierarchical Clustering#####', '# run agglomerative hierarchical clustering on preprocessed data X', 'clusters = 5', "hc = AgglomerativeClustering(n_clusters= clusters, affinity='euclidean', linkage='ward')", 'labels_hc = hc.fit_predict(X)', 'print(labels_hc)', '', '###optimal pairs via statistical analysis with DBSCAN clusters', '', '#fetch normalised log returns for assets belonging to DBSCAN cluster 1 (label = 1)', 'cluster1_asset_list = d[1].index.values', 'clusters_norm_ret_df = norm_ret_df[cluster1_asset_list]', 'print(clusters_norm_ret_df)', '', '#cumulative returns', 'cumulative_norm_ret=clusters_norm_ret_df.cumsum()', '', '#optimal pairs via minimum sum of Euclidean squared distances btw cumulative log normalised returns', 'pair_order_list = itertools.combinations(cluster1_asset_list,2)', 'pairs=list(pair_order_list)', 'asset1_list=[]', 'asset2_list=[]', 'euclidean_distance_list=[]', 'for i in range(0,len(pairs)):', ' asset1_list.append(pairs[i][0])', ' asset2_list.append(pairs[i][1])', '', ' dist = LA.norm(cumulative_norm_ret[asset1_list[i]]-cumulative_norm_ret[asset2_list[i]])', ' euclidean_distance_list.append(dist)', '', '# asset1_list,asset2_list', '', 'sdd_list=list(zip(pairs,euclidean_distance_list))', 'sdd_list.sort(key = lambda x: x[1])', '', '#sort every pairwise combination based off of the euclidean squared distances. A unique optimal pair will occur with the minimum.', '# example: if pair A and B have a distance of 2 and A and C have a distance of 3 and C and D have a distance of 4, the optimal pairing would be (A,B) and (C,D)', '# write the pairs in the tuple form (A,B) returns a list of these unique optimal pairs', '# Each asset in a pair should not have previously been paired with another (no repeating assets per pairs)', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sdd_list)):', ' sdd1.append(sdd_list[i][0][0])', ' sdd2.append(sdd_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_messd = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sdd_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_messd.append(pair)', '', ' if len(selected_pairs_messd) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_messd)', '', '####### optimal pairs through correlation strategy use the normalised log returns of DBSCAN cluster1 assets', '', '#calculate pearson correlation for every possible pairing of assets', 'pearson_corr_list=[]', '', 'for i in range(0,len(pairs)):', ' corr= pearsonr(clusters_norm_ret_df[pairs[i][0]],clusters_norm_ret_df[pairs[i][1]])[0]', ' pearson_corr_list.append(corr)', '', '#sort pairs by pearson correlation', 'sort_corr_list=list(zip(pairs,pearson_corr_list))', 'sort_corr_list.sort(key = lambda x: x[1])', '', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sort_corr_list)):', ' sdd1.append(sort_corr_list[i][0][0])', ' sdd2.append(sort_corr_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_corr = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sort_corr_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_corr.append(pair)', '', ' if len(selected_pairs_corr) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_corr)', '', '###### check which asset pairs are cointegrated from DSCAN cluster 1', 'coint_pairs=[]', 'np.random.seed(107)', 'for i in range(0,len(pairs)):', ' score, pvalue, _ = coint(np.cumsum(clusters_norm_ret_df[pairs[i][0]]),np.cumsum(clusters_norm_ret_df[pairs[i][1]]))', ' confidence_level = 0.05', ' if pvalue < confidence_level:', ' coint_pairs.append(pairs[i])', 'print(coint_pairs)']
[]
Variable 'kmeans' used at line 63 is defined at line 62 and has a Short-Range dependency. Variable 'X' used at line 63 is defined at line 29 and has a Long-Range dependency.
{}
{'Variable Short-Range': 1, 'Variable Long-Range': 1}
completion_python
Timeseries_Clustering
73
73
['import pandas as pd', 'import numpy as np', 'import scipy.stats as stats', 'from sklearn.decomposition import PCA', 'from sklearn import metrics', 'from sklearn.cluster import DBSCAN', 'from sklearn.manifold import TSNE', 'from sklearn import preprocessing', 'from sklearn.cluster import KMeans', 'from sklearn.cluster import AgglomerativeClustering', 'import scipy.cluster.hierarchy as shc', 'import itertools', 'import math', 'import random', 'from numpy import linalg as LA', 'from scipy.stats import pearsonr', 'from statsmodels.tsa.stattools import coint', '', '#load dataframe', "norm_ret_df = pd.read_csv('./norm_ret_df.csv', index_col = False)", '#perform PCA with 10 components, whiten=True -> will return 10 principle components for each asset', 'pca = PCA(n_components=10, whiten=True)', 'beta=pca.fit_transform(norm_ret_df.T)', 'df_beta=pd.DataFrame(beta)', 'stock_pca = df_beta.values', 'print(stock_pca.shape)', '', '#standardise principal component array with prepcoessing.StandardScaler() and apply this with fit_transform to the pca_stock array ', 'X = preprocessing.StandardScaler().fit_transform(stock_pca)', '', '######### DBSCAN #########', '# #perform DBSCAN clustering algorithm on preprocessed data eps=2, min_samples =3', 'dbscan = DBSCAN(eps=2, min_samples=3)', 'dbscan.fit_predict(X)', 'labels = dbscan.labels_', 'n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)', 'print(labels)', '', '#attach cluster labels to the last column norm_ret_df', "df_beta['labels']=labels", 'df_beta[\'labels\'] = df_beta[\'labels\'].astype("category")', 'df_beta=df_beta.set_index(norm_ret_df.T.index)', "df_beta.sort_values(by=['labels'])", 'print(df_beta)', '', '#sort the rows with the same label into each of their own dataframes', 'k_list=np.arange(0,n_clusters_,1)', 'k_list', 'd = {}', 'for k in k_list:', ' d[k] = pd.DataFrame()', " d[k] = df_beta[df_beta['labels'] ==k]", '', 'print(d[0])', 'print(d[1])', 'print(d[2])', 'print(d[3])', 'print(d[4])', '', '#######KMEANS#######', '#run kmeans on pre processed data (X) set clusters to 5, n_init = 10, and random_state=42 so alog is iniitialised the same every time', 'kmeans = KMeans(n_clusters=5, n_init=10, random_state=42)', 'kmeans.fit(X)', 'label_kmeans=kmeans.predict(X) ', 'center_kmeans=kmeans.cluster_centers_', 'print(label_kmeans)', 'print(center_kmeans)', '', '####Hierarchical Clustering#####', '# run agglomerative hierarchical clustering on preprocessed data X', 'clusters = 5', "hc = AgglomerativeClustering(n_clusters= clusters, affinity='euclidean', linkage='ward')"]
['labels_hc = hc.fit_predict(X)']
['print(labels_hc)', '', '###optimal pairs via statistical analysis with DBSCAN clusters', '', '#fetch normalised log returns for assets belonging to DBSCAN cluster 1 (label = 1)', 'cluster1_asset_list = d[1].index.values', 'clusters_norm_ret_df = norm_ret_df[cluster1_asset_list]', 'print(clusters_norm_ret_df)', '', '#cumulative returns', 'cumulative_norm_ret=clusters_norm_ret_df.cumsum()', '', '#optimal pairs via minimum sum of Euclidean squared distances btw cumulative log normalised returns', 'pair_order_list = itertools.combinations(cluster1_asset_list,2)', 'pairs=list(pair_order_list)', 'asset1_list=[]', 'asset2_list=[]', 'euclidean_distance_list=[]', 'for i in range(0,len(pairs)):', ' asset1_list.append(pairs[i][0])', ' asset2_list.append(pairs[i][1])', '', ' dist = LA.norm(cumulative_norm_ret[asset1_list[i]]-cumulative_norm_ret[asset2_list[i]])', ' euclidean_distance_list.append(dist)', '', '# asset1_list,asset2_list', '', 'sdd_list=list(zip(pairs,euclidean_distance_list))', 'sdd_list.sort(key = lambda x: x[1])', '', '#sort every pairwise combination based off of the euclidean squared distances. A unique optimal pair will occur with the minimum.', '# example: if pair A and B have a distance of 2 and A and C have a distance of 3 and C and D have a distance of 4, the optimal pairing would be (A,B) and (C,D)', '# write the pairs in the tuple form (A,B) returns a list of these unique optimal pairs', '# Each asset in a pair should not have previously been paired with another (no repeating assets per pairs)', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sdd_list)):', ' sdd1.append(sdd_list[i][0][0])', ' sdd2.append(sdd_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_messd = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sdd_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_messd.append(pair)', '', ' if len(selected_pairs_messd) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_messd)', '', '####### optimal pairs through correlation strategy use the normalised log returns of DBSCAN cluster1 assets', '', '#calculate pearson correlation for every possible pairing of assets', 'pearson_corr_list=[]', '', 'for i in range(0,len(pairs)):', ' corr= pearsonr(clusters_norm_ret_df[pairs[i][0]],clusters_norm_ret_df[pairs[i][1]])[0]', ' pearson_corr_list.append(corr)', '', '#sort pairs by pearson correlation', 'sort_corr_list=list(zip(pairs,pearson_corr_list))', 'sort_corr_list.sort(key = lambda x: x[1])', '', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sort_corr_list)):', ' sdd1.append(sort_corr_list[i][0][0])', ' sdd2.append(sort_corr_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_corr = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sort_corr_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_corr.append(pair)', '', ' if len(selected_pairs_corr) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_corr)', '', '###### check which asset pairs are cointegrated from DSCAN cluster 1', 'coint_pairs=[]', 'np.random.seed(107)', 'for i in range(0,len(pairs)):', ' score, pvalue, _ = coint(np.cumsum(clusters_norm_ret_df[pairs[i][0]]),np.cumsum(clusters_norm_ret_df[pairs[i][1]]))', ' confidence_level = 0.05', ' if pvalue < confidence_level:', ' coint_pairs.append(pairs[i])', 'print(coint_pairs)']
[]
Variable 'hc' used at line 73 is defined at line 72 and has a Short-Range dependency. Variable 'X' used at line 73 is defined at line 29 and has a Long-Range dependency.
{}
{'Variable Short-Range': 1, 'Variable Long-Range': 1}
completion_python
Timeseries_Clustering
93
97
['import pandas as pd', 'import numpy as np', 'import scipy.stats as stats', 'from sklearn.decomposition import PCA', 'from sklearn import metrics', 'from sklearn.cluster import DBSCAN', 'from sklearn.manifold import TSNE', 'from sklearn import preprocessing', 'from sklearn.cluster import KMeans', 'from sklearn.cluster import AgglomerativeClustering', 'import scipy.cluster.hierarchy as shc', 'import itertools', 'import math', 'import random', 'from numpy import linalg as LA', 'from scipy.stats import pearsonr', 'from statsmodels.tsa.stattools import coint', '', '#load dataframe', "norm_ret_df = pd.read_csv('./norm_ret_df.csv', index_col = False)", '#perform PCA with 10 components, whiten=True -> will return 10 principle components for each asset', 'pca = PCA(n_components=10, whiten=True)', 'beta=pca.fit_transform(norm_ret_df.T)', 'df_beta=pd.DataFrame(beta)', 'stock_pca = df_beta.values', 'print(stock_pca.shape)', '', '#standardise principal component array with prepcoessing.StandardScaler() and apply this with fit_transform to the pca_stock array ', 'X = preprocessing.StandardScaler().fit_transform(stock_pca)', '', '######### DBSCAN #########', '# #perform DBSCAN clustering algorithm on preprocessed data eps=2, min_samples =3', 'dbscan = DBSCAN(eps=2, min_samples=3)', 'dbscan.fit_predict(X)', 'labels = dbscan.labels_', 'n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)', 'print(labels)', '', '#attach cluster labels to the last column norm_ret_df', "df_beta['labels']=labels", 'df_beta[\'labels\'] = df_beta[\'labels\'].astype("category")', 'df_beta=df_beta.set_index(norm_ret_df.T.index)', "df_beta.sort_values(by=['labels'])", 'print(df_beta)', '', '#sort the rows with the same label into each of their own dataframes', 'k_list=np.arange(0,n_clusters_,1)', 'k_list', 'd = {}', 'for k in k_list:', ' d[k] = pd.DataFrame()', " d[k] = df_beta[df_beta['labels'] ==k]", '', 'print(d[0])', 'print(d[1])', 'print(d[2])', 'print(d[3])', 'print(d[4])', '', '#######KMEANS#######', '#run kmeans on pre processed data (X) set clusters to 5, n_init = 10, and random_state=42 so alog is iniitialised the same every time', 'kmeans = KMeans(n_clusters=5, n_init=10, random_state=42)', 'kmeans.fit(X)', 'label_kmeans=kmeans.predict(X) ', 'center_kmeans=kmeans.cluster_centers_', 'print(label_kmeans)', 'print(center_kmeans)', '', '####Hierarchical Clustering#####', '# run agglomerative hierarchical clustering on preprocessed data X', 'clusters = 5', "hc = AgglomerativeClustering(n_clusters= clusters, affinity='euclidean', linkage='ward')", 'labels_hc = hc.fit_predict(X)', 'print(labels_hc)', '', '###optimal pairs via statistical analysis with DBSCAN clusters', '', '#fetch normalised log returns for assets belonging to DBSCAN cluster 1 (label = 1)', 'cluster1_asset_list = d[1].index.values', 'clusters_norm_ret_df = norm_ret_df[cluster1_asset_list]', 'print(clusters_norm_ret_df)', '', '#cumulative returns', 'cumulative_norm_ret=clusters_norm_ret_df.cumsum()', '', '#optimal pairs via minimum sum of Euclidean squared distances btw cumulative log normalised returns', 'pair_order_list = itertools.combinations(cluster1_asset_list,2)', 'pairs=list(pair_order_list)', 'asset1_list=[]', 'asset2_list=[]', 'euclidean_distance_list=[]', 'for i in range(0,len(pairs)):']
[' asset1_list.append(pairs[i][0])', ' asset2_list.append(pairs[i][1])', '', ' dist = LA.norm(cumulative_norm_ret[asset1_list[i]]-cumulative_norm_ret[asset2_list[i]])', ' euclidean_distance_list.append(dist)']
['', '# asset1_list,asset2_list', '', 'sdd_list=list(zip(pairs,euclidean_distance_list))', 'sdd_list.sort(key = lambda x: x[1])', '', '#sort every pairwise combination based off of the euclidean squared distances. A unique optimal pair will occur with the minimum.', '# example: if pair A and B have a distance of 2 and A and C have a distance of 3 and C and D have a distance of 4, the optimal pairing would be (A,B) and (C,D)', '# write the pairs in the tuple form (A,B) returns a list of these unique optimal pairs', '# Each asset in a pair should not have previously been paired with another (no repeating assets per pairs)', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sdd_list)):', ' sdd1.append(sdd_list[i][0][0])', ' sdd2.append(sdd_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_messd = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sdd_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_messd.append(pair)', '', ' if len(selected_pairs_messd) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_messd)', '', '####### optimal pairs through correlation strategy use the normalised log returns of DBSCAN cluster1 assets', '', '#calculate pearson correlation for every possible pairing of assets', 'pearson_corr_list=[]', '', 'for i in range(0,len(pairs)):', ' corr= pearsonr(clusters_norm_ret_df[pairs[i][0]],clusters_norm_ret_df[pairs[i][1]])[0]', ' pearson_corr_list.append(corr)', '', '#sort pairs by pearson correlation', 'sort_corr_list=list(zip(pairs,pearson_corr_list))', 'sort_corr_list.sort(key = lambda x: x[1])', '', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sort_corr_list)):', ' sdd1.append(sort_corr_list[i][0][0])', ' sdd2.append(sort_corr_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_corr = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sort_corr_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_corr.append(pair)', '', ' if len(selected_pairs_corr) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_corr)', '', '###### check which asset pairs are cointegrated from DSCAN cluster 1', 'coint_pairs=[]', 'np.random.seed(107)', 'for i in range(0,len(pairs)):', ' score, pvalue, _ = coint(np.cumsum(clusters_norm_ret_df[pairs[i][0]]),np.cumsum(clusters_norm_ret_df[pairs[i][1]]))', ' confidence_level = 0.05', ' if pvalue < confidence_level:', ' coint_pairs.append(pairs[i])', 'print(coint_pairs)']
[{'reason_category': 'Loop Body', 'usage_line': 93}, {'reason_category': 'Loop Body', 'usage_line': 94}, {'reason_category': 'Loop Body', 'usage_line': 95}, {'reason_category': 'Loop Body', 'usage_line': 96}, {'reason_category': 'Loop Body', 'usage_line': 97}]
Variable 'asset1_list' used at line 93 is defined at line 89 and has a Short-Range dependency. Variable 'pairs' used at line 93 is defined at line 88 and has a Short-Range dependency. Variable 'i' used at line 93 is part of a Loop defined at line 92 and has a Short-Range dependency. Variable 'asset2_list' used at line 94 is defined at line 90 and has a Short-Range dependency. Variable 'pairs' used at line 94 is defined at line 88 and has a Short-Range dependency. Variable 'i' used at line 94 is part of a Loop defined at line 92 and has a Short-Range dependency. Library 'LA' used at line 96 is imported at line 15 and has a Long-Range dependency. Variable 'cumulative_norm_ret' used at line 96 is defined at line 84 and has a Medium-Range dependency. Variable 'asset1_list' used at line 96 is defined at line 89 and has a Short-Range dependency. Variable 'i' used at line 96 is part of a Loop defined at line 92 and has a Short-Range dependency. Variable 'asset2_list' used at line 96 is defined at line 90 and has a Short-Range dependency. Variable 'euclidean_distance_list' used at line 97 is defined at line 91 and has a Short-Range dependency. Variable 'dist' used at line 97 is defined at line 96 and has a Short-Range dependency.
{'Loop Body': 5}
{'Variable Short-Range': 8, 'Variable Loop Short-Range': 3, 'Library Long-Range': 1, 'Variable Medium-Range': 1}
completion_python
Timeseries_Clustering
111
112
['import pandas as pd', 'import numpy as np', 'import scipy.stats as stats', 'from sklearn.decomposition import PCA', 'from sklearn import metrics', 'from sklearn.cluster import DBSCAN', 'from sklearn.manifold import TSNE', 'from sklearn import preprocessing', 'from sklearn.cluster import KMeans', 'from sklearn.cluster import AgglomerativeClustering', 'import scipy.cluster.hierarchy as shc', 'import itertools', 'import math', 'import random', 'from numpy import linalg as LA', 'from scipy.stats import pearsonr', 'from statsmodels.tsa.stattools import coint', '', '#load dataframe', "norm_ret_df = pd.read_csv('./norm_ret_df.csv', index_col = False)", '#perform PCA with 10 components, whiten=True -> will return 10 principle components for each asset', 'pca = PCA(n_components=10, whiten=True)', 'beta=pca.fit_transform(norm_ret_df.T)', 'df_beta=pd.DataFrame(beta)', 'stock_pca = df_beta.values', 'print(stock_pca.shape)', '', '#standardise principal component array with prepcoessing.StandardScaler() and apply this with fit_transform to the pca_stock array ', 'X = preprocessing.StandardScaler().fit_transform(stock_pca)', '', '######### DBSCAN #########', '# #perform DBSCAN clustering algorithm on preprocessed data eps=2, min_samples =3', 'dbscan = DBSCAN(eps=2, min_samples=3)', 'dbscan.fit_predict(X)', 'labels = dbscan.labels_', 'n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)', 'print(labels)', '', '#attach cluster labels to the last column norm_ret_df', "df_beta['labels']=labels", 'df_beta[\'labels\'] = df_beta[\'labels\'].astype("category")', 'df_beta=df_beta.set_index(norm_ret_df.T.index)', "df_beta.sort_values(by=['labels'])", 'print(df_beta)', '', '#sort the rows with the same label into each of their own dataframes', 'k_list=np.arange(0,n_clusters_,1)', 'k_list', 'd = {}', 'for k in k_list:', ' d[k] = pd.DataFrame()', " d[k] = df_beta[df_beta['labels'] ==k]", '', 'print(d[0])', 'print(d[1])', 'print(d[2])', 'print(d[3])', 'print(d[4])', '', '#######KMEANS#######', '#run kmeans on pre processed data (X) set clusters to 5, n_init = 10, and random_state=42 so alog is iniitialised the same every time', 'kmeans = KMeans(n_clusters=5, n_init=10, random_state=42)', 'kmeans.fit(X)', 'label_kmeans=kmeans.predict(X) ', 'center_kmeans=kmeans.cluster_centers_', 'print(label_kmeans)', 'print(center_kmeans)', '', '####Hierarchical Clustering#####', '# run agglomerative hierarchical clustering on preprocessed data X', 'clusters = 5', "hc = AgglomerativeClustering(n_clusters= clusters, affinity='euclidean', linkage='ward')", 'labels_hc = hc.fit_predict(X)', 'print(labels_hc)', '', '###optimal pairs via statistical analysis with DBSCAN clusters', '', '#fetch normalised log returns for assets belonging to DBSCAN cluster 1 (label = 1)', 'cluster1_asset_list = d[1].index.values', 'clusters_norm_ret_df = norm_ret_df[cluster1_asset_list]', 'print(clusters_norm_ret_df)', '', '#cumulative returns', 'cumulative_norm_ret=clusters_norm_ret_df.cumsum()', '', '#optimal pairs via minimum sum of Euclidean squared distances btw cumulative log normalised returns', 'pair_order_list = itertools.combinations(cluster1_asset_list,2)', 'pairs=list(pair_order_list)', 'asset1_list=[]', 'asset2_list=[]', 'euclidean_distance_list=[]', 'for i in range(0,len(pairs)):', ' asset1_list.append(pairs[i][0])', ' asset2_list.append(pairs[i][1])', '', ' dist = LA.norm(cumulative_norm_ret[asset1_list[i]]-cumulative_norm_ret[asset2_list[i]])', ' euclidean_distance_list.append(dist)', '', '# asset1_list,asset2_list', '', 'sdd_list=list(zip(pairs,euclidean_distance_list))', 'sdd_list.sort(key = lambda x: x[1])', '', '#sort every pairwise combination based off of the euclidean squared distances. A unique optimal pair will occur with the minimum.', '# example: if pair A and B have a distance of 2 and A and C have a distance of 3 and C and D have a distance of 4, the optimal pairing would be (A,B) and (C,D)', '# write the pairs in the tuple form (A,B) returns a list of these unique optimal pairs', '# Each asset in a pair should not have previously been paired with another (no repeating assets per pairs)', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sdd_list)):']
[' sdd1.append(sdd_list[i][0][0])', ' sdd2.append(sdd_list[i][0][1])']
['', 'selected_stocks = []', 'selected_pairs_messd = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sdd_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_messd.append(pair)', '', ' if len(selected_pairs_messd) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_messd)', '', '####### optimal pairs through correlation strategy use the normalised log returns of DBSCAN cluster1 assets', '', '#calculate pearson correlation for every possible pairing of assets', 'pearson_corr_list=[]', '', 'for i in range(0,len(pairs)):', ' corr= pearsonr(clusters_norm_ret_df[pairs[i][0]],clusters_norm_ret_df[pairs[i][1]])[0]', ' pearson_corr_list.append(corr)', '', '#sort pairs by pearson correlation', 'sort_corr_list=list(zip(pairs,pearson_corr_list))', 'sort_corr_list.sort(key = lambda x: x[1])', '', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sort_corr_list)):', ' sdd1.append(sort_corr_list[i][0][0])', ' sdd2.append(sort_corr_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_corr = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sort_corr_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_corr.append(pair)', '', ' if len(selected_pairs_corr) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_corr)', '', '###### check which asset pairs are cointegrated from DSCAN cluster 1', 'coint_pairs=[]', 'np.random.seed(107)', 'for i in range(0,len(pairs)):', ' score, pvalue, _ = coint(np.cumsum(clusters_norm_ret_df[pairs[i][0]]),np.cumsum(clusters_norm_ret_df[pairs[i][1]]))', ' confidence_level = 0.05', ' if pvalue < confidence_level:', ' coint_pairs.append(pairs[i])', 'print(coint_pairs)']
[{'reason_category': 'Loop Body', 'usage_line': 111}, {'reason_category': 'Loop Body', 'usage_line': 112}]
Variable 'sdd1' used at line 111 is defined at line 108 and has a Short-Range dependency. Variable 'sdd_list' used at line 111 is defined at line 101 and has a Short-Range dependency. Variable 'i' used at line 111 is part of a Loop defined at line 110 and has a Short-Range dependency. Variable 'sdd2' used at line 112 is defined at line 109 and has a Short-Range dependency. Variable 'sdd_list' used at line 112 is defined at line 101 and has a Medium-Range dependency. Variable 'i' used at line 112 is part of a Loop defined at line 110 and has a Short-Range dependency.
{'Loop Body': 2}
{'Variable Short-Range': 3, 'Variable Loop Short-Range': 2, 'Variable Medium-Range': 1}
completion_python
Timeseries_Clustering
120
130
['import pandas as pd', 'import numpy as np', 'import scipy.stats as stats', 'from sklearn.decomposition import PCA', 'from sklearn import metrics', 'from sklearn.cluster import DBSCAN', 'from sklearn.manifold import TSNE', 'from sklearn import preprocessing', 'from sklearn.cluster import KMeans', 'from sklearn.cluster import AgglomerativeClustering', 'import scipy.cluster.hierarchy as shc', 'import itertools', 'import math', 'import random', 'from numpy import linalg as LA', 'from scipy.stats import pearsonr', 'from statsmodels.tsa.stattools import coint', '', '#load dataframe', "norm_ret_df = pd.read_csv('./norm_ret_df.csv', index_col = False)", '#perform PCA with 10 components, whiten=True -> will return 10 principle components for each asset', 'pca = PCA(n_components=10, whiten=True)', 'beta=pca.fit_transform(norm_ret_df.T)', 'df_beta=pd.DataFrame(beta)', 'stock_pca = df_beta.values', 'print(stock_pca.shape)', '', '#standardise principal component array with prepcoessing.StandardScaler() and apply this with fit_transform to the pca_stock array ', 'X = preprocessing.StandardScaler().fit_transform(stock_pca)', '', '######### DBSCAN #########', '# #perform DBSCAN clustering algorithm on preprocessed data eps=2, min_samples =3', 'dbscan = DBSCAN(eps=2, min_samples=3)', 'dbscan.fit_predict(X)', 'labels = dbscan.labels_', 'n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)', 'print(labels)', '', '#attach cluster labels to the last column norm_ret_df', "df_beta['labels']=labels", 'df_beta[\'labels\'] = df_beta[\'labels\'].astype("category")', 'df_beta=df_beta.set_index(norm_ret_df.T.index)', "df_beta.sort_values(by=['labels'])", 'print(df_beta)', '', '#sort the rows with the same label into each of their own dataframes', 'k_list=np.arange(0,n_clusters_,1)', 'k_list', 'd = {}', 'for k in k_list:', ' d[k] = pd.DataFrame()', " d[k] = df_beta[df_beta['labels'] ==k]", '', 'print(d[0])', 'print(d[1])', 'print(d[2])', 'print(d[3])', 'print(d[4])', '', '#######KMEANS#######', '#run kmeans on pre processed data (X) set clusters to 5, n_init = 10, and random_state=42 so alog is iniitialised the same every time', 'kmeans = KMeans(n_clusters=5, n_init=10, random_state=42)', 'kmeans.fit(X)', 'label_kmeans=kmeans.predict(X) ', 'center_kmeans=kmeans.cluster_centers_', 'print(label_kmeans)', 'print(center_kmeans)', '', '####Hierarchical Clustering#####', '# run agglomerative hierarchical clustering on preprocessed data X', 'clusters = 5', "hc = AgglomerativeClustering(n_clusters= clusters, affinity='euclidean', linkage='ward')", 'labels_hc = hc.fit_predict(X)', 'print(labels_hc)', '', '###optimal pairs via statistical analysis with DBSCAN clusters', '', '#fetch normalised log returns for assets belonging to DBSCAN cluster 1 (label = 1)', 'cluster1_asset_list = d[1].index.values', 'clusters_norm_ret_df = norm_ret_df[cluster1_asset_list]', 'print(clusters_norm_ret_df)', '', '#cumulative returns', 'cumulative_norm_ret=clusters_norm_ret_df.cumsum()', '', '#optimal pairs via minimum sum of Euclidean squared distances btw cumulative log normalised returns', 'pair_order_list = itertools.combinations(cluster1_asset_list,2)', 'pairs=list(pair_order_list)', 'asset1_list=[]', 'asset2_list=[]', 'euclidean_distance_list=[]', 'for i in range(0,len(pairs)):', ' asset1_list.append(pairs[i][0])', ' asset2_list.append(pairs[i][1])', '', ' dist = LA.norm(cumulative_norm_ret[asset1_list[i]]-cumulative_norm_ret[asset2_list[i]])', ' euclidean_distance_list.append(dist)', '', '# asset1_list,asset2_list', '', 'sdd_list=list(zip(pairs,euclidean_distance_list))', 'sdd_list.sort(key = lambda x: x[1])', '', '#sort every pairwise combination based off of the euclidean squared distances. A unique optimal pair will occur with the minimum.', '# example: if pair A and B have a distance of 2 and A and C have a distance of 3 and C and D have a distance of 4, the optimal pairing would be (A,B) and (C,D)', '# write the pairs in the tuple form (A,B) returns a list of these unique optimal pairs', '# Each asset in a pair should not have previously been paired with another (no repeating assets per pairs)', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sdd_list)):', ' sdd1.append(sdd_list[i][0][0])', ' sdd2.append(sdd_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_messd = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sdd_list)):']
[' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_messd.append(pair)', '', ' if len(selected_pairs_messd) == math.comb(len(cluster1_asset_list),2):', ' break']
['', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_messd)', '', '####### optimal pairs through correlation strategy use the normalised log returns of DBSCAN cluster1 assets', '', '#calculate pearson correlation for every possible pairing of assets', 'pearson_corr_list=[]', '', 'for i in range(0,len(pairs)):', ' corr= pearsonr(clusters_norm_ret_df[pairs[i][0]],clusters_norm_ret_df[pairs[i][1]])[0]', ' pearson_corr_list.append(corr)', '', '#sort pairs by pearson correlation', 'sort_corr_list=list(zip(pairs,pearson_corr_list))', 'sort_corr_list.sort(key = lambda x: x[1])', '', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sort_corr_list)):', ' sdd1.append(sort_corr_list[i][0][0])', ' sdd2.append(sort_corr_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_corr = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sort_corr_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_corr.append(pair)', '', ' if len(selected_pairs_corr) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_corr)', '', '###### check which asset pairs are cointegrated from DSCAN cluster 1', 'coint_pairs=[]', 'np.random.seed(107)', 'for i in range(0,len(pairs)):', ' score, pvalue, _ = coint(np.cumsum(clusters_norm_ret_df[pairs[i][0]]),np.cumsum(clusters_norm_ret_df[pairs[i][1]]))', ' confidence_level = 0.05', ' if pvalue < confidence_level:', ' coint_pairs.append(pairs[i])', 'print(coint_pairs)']
[{'reason_category': 'Loop Body', 'usage_line': 120}, {'reason_category': 'Loop Body', 'usage_line': 121}, {'reason_category': 'Loop Body', 'usage_line': 122}, {'reason_category': 'Loop Body', 'usage_line': 123}, {'reason_category': 'If Condition', 'usage_line': 123}, {'reason_category': 'Loop Body', 'usage_line': 124}, {'reason_category': 'If Body', 'usage_line': 124}, {'reason_category': 'Loop Body', 'usage_line': 125}, {'reason_category': 'If Body', 'usage_line': 125}, {'reason_category': 'Loop Body', 'usage_line': 126}, {'reason_category': 'If Body', 'usage_line': 126}, {'reason_category': 'Loop Body', 'usage_line': 127}, {'reason_category': 'If Body', 'usage_line': 127}, {'reason_category': 'Loop Body', 'usage_line': 128}, {'reason_category': 'Loop Body', 'usage_line': 129}, {'reason_category': 'If Condition', 'usage_line': 129}, {'reason_category': 'Loop Body', 'usage_line': 130}, {'reason_category': 'If Body', 'usage_line': 130}]
Variable 'sdd1' used at line 120 is defined at line 108 and has a Medium-Range dependency. Variable 'i' used at line 120 is part of a Loop defined at line 119 and has a Short-Range dependency. Variable 'sdd2' used at line 121 is defined at line 109 and has a Medium-Range dependency. Variable 'i' used at line 121 is part of a Loop defined at line 119 and has a Short-Range dependency. Variable 's1' used at line 123 is defined at line 120 and has a Short-Range dependency. Variable 'selected_stocks' used at line 123 is defined at line 114 and has a Short-Range dependency. Variable 's2' used at line 123 is defined at line 121 and has a Short-Range dependency. Variable 'selected_stocks' used at line 124 is defined at line 114 and has a Short-Range dependency. Variable 's1' used at line 124 is defined at line 120 and has a Short-Range dependency. Variable 'selected_stocks' used at line 125 is defined at line 114 and has a Medium-Range dependency. Variable 's2' used at line 125 is defined at line 121 and has a Short-Range dependency. Variable 's1' used at line 126 is defined at line 120 and has a Short-Range dependency. Variable 's2' used at line 126 is defined at line 121 and has a Short-Range dependency. Variable 'selected_pairs_messd' used at line 127 is defined at line 115 and has a Medium-Range dependency. Variable 'pair' used at line 127 is defined at line 126 and has a Short-Range dependency. Variable 'selected_pairs_messd' used at line 129 is defined at line 115 and has a Medium-Range dependency. Library 'math' used at line 129 is imported at line 13 and has a Long-Range dependency. Variable 'cluster1_asset_list' used at line 129 is defined at line 79 and has a Long-Range dependency.
{'Loop Body': 11, 'If Condition': 2, 'If Body': 5}
{'Variable Medium-Range': 5, 'Variable Loop Short-Range': 2, 'Variable Short-Range': 9, 'Library Long-Range': 1, 'Variable Long-Range': 1}
completion_python
Timeseries_Clustering
124
127
['import pandas as pd', 'import numpy as np', 'import scipy.stats as stats', 'from sklearn.decomposition import PCA', 'from sklearn import metrics', 'from sklearn.cluster import DBSCAN', 'from sklearn.manifold import TSNE', 'from sklearn import preprocessing', 'from sklearn.cluster import KMeans', 'from sklearn.cluster import AgglomerativeClustering', 'import scipy.cluster.hierarchy as shc', 'import itertools', 'import math', 'import random', 'from numpy import linalg as LA', 'from scipy.stats import pearsonr', 'from statsmodels.tsa.stattools import coint', '', '#load dataframe', "norm_ret_df = pd.read_csv('./norm_ret_df.csv', index_col = False)", '#perform PCA with 10 components, whiten=True -> will return 10 principle components for each asset', 'pca = PCA(n_components=10, whiten=True)', 'beta=pca.fit_transform(norm_ret_df.T)', 'df_beta=pd.DataFrame(beta)', 'stock_pca = df_beta.values', 'print(stock_pca.shape)', '', '#standardise principal component array with prepcoessing.StandardScaler() and apply this with fit_transform to the pca_stock array ', 'X = preprocessing.StandardScaler().fit_transform(stock_pca)', '', '######### DBSCAN #########', '# #perform DBSCAN clustering algorithm on preprocessed data eps=2, min_samples =3', 'dbscan = DBSCAN(eps=2, min_samples=3)', 'dbscan.fit_predict(X)', 'labels = dbscan.labels_', 'n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)', 'print(labels)', '', '#attach cluster labels to the last column norm_ret_df', "df_beta['labels']=labels", 'df_beta[\'labels\'] = df_beta[\'labels\'].astype("category")', 'df_beta=df_beta.set_index(norm_ret_df.T.index)', "df_beta.sort_values(by=['labels'])", 'print(df_beta)', '', '#sort the rows with the same label into each of their own dataframes', 'k_list=np.arange(0,n_clusters_,1)', 'k_list', 'd = {}', 'for k in k_list:', ' d[k] = pd.DataFrame()', " d[k] = df_beta[df_beta['labels'] ==k]", '', 'print(d[0])', 'print(d[1])', 'print(d[2])', 'print(d[3])', 'print(d[4])', '', '#######KMEANS#######', '#run kmeans on pre processed data (X) set clusters to 5, n_init = 10, and random_state=42 so alog is iniitialised the same every time', 'kmeans = KMeans(n_clusters=5, n_init=10, random_state=42)', 'kmeans.fit(X)', 'label_kmeans=kmeans.predict(X) ', 'center_kmeans=kmeans.cluster_centers_', 'print(label_kmeans)', 'print(center_kmeans)', '', '####Hierarchical Clustering#####', '# run agglomerative hierarchical clustering on preprocessed data X', 'clusters = 5', "hc = AgglomerativeClustering(n_clusters= clusters, affinity='euclidean', linkage='ward')", 'labels_hc = hc.fit_predict(X)', 'print(labels_hc)', '', '###optimal pairs via statistical analysis with DBSCAN clusters', '', '#fetch normalised log returns for assets belonging to DBSCAN cluster 1 (label = 1)', 'cluster1_asset_list = d[1].index.values', 'clusters_norm_ret_df = norm_ret_df[cluster1_asset_list]', 'print(clusters_norm_ret_df)', '', '#cumulative returns', 'cumulative_norm_ret=clusters_norm_ret_df.cumsum()', '', '#optimal pairs via minimum sum of Euclidean squared distances btw cumulative log normalised returns', 'pair_order_list = itertools.combinations(cluster1_asset_list,2)', 'pairs=list(pair_order_list)', 'asset1_list=[]', 'asset2_list=[]', 'euclidean_distance_list=[]', 'for i in range(0,len(pairs)):', ' asset1_list.append(pairs[i][0])', ' asset2_list.append(pairs[i][1])', '', ' dist = LA.norm(cumulative_norm_ret[asset1_list[i]]-cumulative_norm_ret[asset2_list[i]])', ' euclidean_distance_list.append(dist)', '', '# asset1_list,asset2_list', '', 'sdd_list=list(zip(pairs,euclidean_distance_list))', 'sdd_list.sort(key = lambda x: x[1])', '', '#sort every pairwise combination based off of the euclidean squared distances. A unique optimal pair will occur with the minimum.', '# example: if pair A and B have a distance of 2 and A and C have a distance of 3 and C and D have a distance of 4, the optimal pairing would be (A,B) and (C,D)', '# write the pairs in the tuple form (A,B) returns a list of these unique optimal pairs', '# Each asset in a pair should not have previously been paired with another (no repeating assets per pairs)', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sdd_list)):', ' sdd1.append(sdd_list[i][0][0])', ' sdd2.append(sdd_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_messd = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sdd_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):']
[' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_messd.append(pair)']
['', ' if len(selected_pairs_messd) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_messd)', '', '####### optimal pairs through correlation strategy use the normalised log returns of DBSCAN cluster1 assets', '', '#calculate pearson correlation for every possible pairing of assets', 'pearson_corr_list=[]', '', 'for i in range(0,len(pairs)):', ' corr= pearsonr(clusters_norm_ret_df[pairs[i][0]],clusters_norm_ret_df[pairs[i][1]])[0]', ' pearson_corr_list.append(corr)', '', '#sort pairs by pearson correlation', 'sort_corr_list=list(zip(pairs,pearson_corr_list))', 'sort_corr_list.sort(key = lambda x: x[1])', '', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sort_corr_list)):', ' sdd1.append(sort_corr_list[i][0][0])', ' sdd2.append(sort_corr_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_corr = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sort_corr_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_corr.append(pair)', '', ' if len(selected_pairs_corr) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_corr)', '', '###### check which asset pairs are cointegrated from DSCAN cluster 1', 'coint_pairs=[]', 'np.random.seed(107)', 'for i in range(0,len(pairs)):', ' score, pvalue, _ = coint(np.cumsum(clusters_norm_ret_df[pairs[i][0]]),np.cumsum(clusters_norm_ret_df[pairs[i][1]]))', ' confidence_level = 0.05', ' if pvalue < confidence_level:', ' coint_pairs.append(pairs[i])', 'print(coint_pairs)']
[{'reason_category': 'Loop Body', 'usage_line': 124}, {'reason_category': 'If Body', 'usage_line': 124}, {'reason_category': 'Loop Body', 'usage_line': 125}, {'reason_category': 'If Body', 'usage_line': 125}, {'reason_category': 'Loop Body', 'usage_line': 126}, {'reason_category': 'If Body', 'usage_line': 126}, {'reason_category': 'Loop Body', 'usage_line': 127}, {'reason_category': 'If Body', 'usage_line': 127}]
Variable 'selected_stocks' used at line 124 is defined at line 114 and has a Short-Range dependency. Variable 's1' used at line 124 is defined at line 120 and has a Short-Range dependency. Variable 'selected_stocks' used at line 125 is defined at line 114 and has a Medium-Range dependency. Variable 's2' used at line 125 is defined at line 121 and has a Short-Range dependency. Variable 's1' used at line 126 is defined at line 120 and has a Short-Range dependency. Variable 's2' used at line 126 is defined at line 121 and has a Short-Range dependency. Variable 'selected_pairs_messd' used at line 127 is defined at line 115 and has a Medium-Range dependency. Variable 'pair' used at line 127 is defined at line 126 and has a Short-Range dependency.
{'Loop Body': 4, 'If Body': 4}
{'Variable Short-Range': 6, 'Variable Medium-Range': 2}
completion_python
Timeseries_Clustering
143
144
['import pandas as pd', 'import numpy as np', 'import scipy.stats as stats', 'from sklearn.decomposition import PCA', 'from sklearn import metrics', 'from sklearn.cluster import DBSCAN', 'from sklearn.manifold import TSNE', 'from sklearn import preprocessing', 'from sklearn.cluster import KMeans', 'from sklearn.cluster import AgglomerativeClustering', 'import scipy.cluster.hierarchy as shc', 'import itertools', 'import math', 'import random', 'from numpy import linalg as LA', 'from scipy.stats import pearsonr', 'from statsmodels.tsa.stattools import coint', '', '#load dataframe', "norm_ret_df = pd.read_csv('./norm_ret_df.csv', index_col = False)", '#perform PCA with 10 components, whiten=True -> will return 10 principle components for each asset', 'pca = PCA(n_components=10, whiten=True)', 'beta=pca.fit_transform(norm_ret_df.T)', 'df_beta=pd.DataFrame(beta)', 'stock_pca = df_beta.values', 'print(stock_pca.shape)', '', '#standardise principal component array with prepcoessing.StandardScaler() and apply this with fit_transform to the pca_stock array ', 'X = preprocessing.StandardScaler().fit_transform(stock_pca)', '', '######### DBSCAN #########', '# #perform DBSCAN clustering algorithm on preprocessed data eps=2, min_samples =3', 'dbscan = DBSCAN(eps=2, min_samples=3)', 'dbscan.fit_predict(X)', 'labels = dbscan.labels_', 'n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)', 'print(labels)', '', '#attach cluster labels to the last column norm_ret_df', "df_beta['labels']=labels", 'df_beta[\'labels\'] = df_beta[\'labels\'].astype("category")', 'df_beta=df_beta.set_index(norm_ret_df.T.index)', "df_beta.sort_values(by=['labels'])", 'print(df_beta)', '', '#sort the rows with the same label into each of their own dataframes', 'k_list=np.arange(0,n_clusters_,1)', 'k_list', 'd = {}', 'for k in k_list:', ' d[k] = pd.DataFrame()', " d[k] = df_beta[df_beta['labels'] ==k]", '', 'print(d[0])', 'print(d[1])', 'print(d[2])', 'print(d[3])', 'print(d[4])', '', '#######KMEANS#######', '#run kmeans on pre processed data (X) set clusters to 5, n_init = 10, and random_state=42 so alog is iniitialised the same every time', 'kmeans = KMeans(n_clusters=5, n_init=10, random_state=42)', 'kmeans.fit(X)', 'label_kmeans=kmeans.predict(X) ', 'center_kmeans=kmeans.cluster_centers_', 'print(label_kmeans)', 'print(center_kmeans)', '', '####Hierarchical Clustering#####', '# run agglomerative hierarchical clustering on preprocessed data X', 'clusters = 5', "hc = AgglomerativeClustering(n_clusters= clusters, affinity='euclidean', linkage='ward')", 'labels_hc = hc.fit_predict(X)', 'print(labels_hc)', '', '###optimal pairs via statistical analysis with DBSCAN clusters', '', '#fetch normalised log returns for assets belonging to DBSCAN cluster 1 (label = 1)', 'cluster1_asset_list = d[1].index.values', 'clusters_norm_ret_df = norm_ret_df[cluster1_asset_list]', 'print(clusters_norm_ret_df)', '', '#cumulative returns', 'cumulative_norm_ret=clusters_norm_ret_df.cumsum()', '', '#optimal pairs via minimum sum of Euclidean squared distances btw cumulative log normalised returns', 'pair_order_list = itertools.combinations(cluster1_asset_list,2)', 'pairs=list(pair_order_list)', 'asset1_list=[]', 'asset2_list=[]', 'euclidean_distance_list=[]', 'for i in range(0,len(pairs)):', ' asset1_list.append(pairs[i][0])', ' asset2_list.append(pairs[i][1])', '', ' dist = LA.norm(cumulative_norm_ret[asset1_list[i]]-cumulative_norm_ret[asset2_list[i]])', ' euclidean_distance_list.append(dist)', '', '# asset1_list,asset2_list', '', 'sdd_list=list(zip(pairs,euclidean_distance_list))', 'sdd_list.sort(key = lambda x: x[1])', '', '#sort every pairwise combination based off of the euclidean squared distances. A unique optimal pair will occur with the minimum.', '# example: if pair A and B have a distance of 2 and A and C have a distance of 3 and C and D have a distance of 4, the optimal pairing would be (A,B) and (C,D)', '# write the pairs in the tuple form (A,B) returns a list of these unique optimal pairs', '# Each asset in a pair should not have previously been paired with another (no repeating assets per pairs)', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sdd_list)):', ' sdd1.append(sdd_list[i][0][0])', ' sdd2.append(sdd_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_messd = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sdd_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_messd.append(pair)', '', ' if len(selected_pairs_messd) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_messd)', '', '####### optimal pairs through correlation strategy use the normalised log returns of DBSCAN cluster1 assets', '', '#calculate pearson correlation for every possible pairing of assets', 'pearson_corr_list=[]', '', 'for i in range(0,len(pairs)):']
[' corr= pearsonr(clusters_norm_ret_df[pairs[i][0]],clusters_norm_ret_df[pairs[i][1]])[0]', ' pearson_corr_list.append(corr)']
['', '#sort pairs by pearson correlation', 'sort_corr_list=list(zip(pairs,pearson_corr_list))', 'sort_corr_list.sort(key = lambda x: x[1])', '', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sort_corr_list)):', ' sdd1.append(sort_corr_list[i][0][0])', ' sdd2.append(sort_corr_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_corr = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sort_corr_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_corr.append(pair)', '', ' if len(selected_pairs_corr) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_corr)', '', '###### check which asset pairs are cointegrated from DSCAN cluster 1', 'coint_pairs=[]', 'np.random.seed(107)', 'for i in range(0,len(pairs)):', ' score, pvalue, _ = coint(np.cumsum(clusters_norm_ret_df[pairs[i][0]]),np.cumsum(clusters_norm_ret_df[pairs[i][1]]))', ' confidence_level = 0.05', ' if pvalue < confidence_level:', ' coint_pairs.append(pairs[i])', 'print(coint_pairs)']
[{'reason_category': 'Loop Body', 'usage_line': 143}, {'reason_category': 'Loop Body', 'usage_line': 144}]
Library 'pearsonr' used at line 143 is imported at line 16 and has a Long-Range dependency. Variable 'clusters_norm_ret_df' used at line 143 is defined at line 80 and has a Long-Range dependency. Variable 'pairs' used at line 143 is defined at line 88 and has a Long-Range dependency. Variable 'i' used at line 143 is part of a Loop defined at line 142 and has a Short-Range dependency. Variable 'pearson_corr_list' used at line 144 is defined at line 140 and has a Short-Range dependency. Variable 'corr' used at line 144 is defined at line 143 and has a Short-Range dependency.
{'Loop Body': 2}
{'Library Long-Range': 1, 'Variable Long-Range': 2, 'Variable Loop Short-Range': 1, 'Variable Short-Range': 2}
completion_python
Timeseries_Clustering
153
154
['import pandas as pd', 'import numpy as np', 'import scipy.stats as stats', 'from sklearn.decomposition import PCA', 'from sklearn import metrics', 'from sklearn.cluster import DBSCAN', 'from sklearn.manifold import TSNE', 'from sklearn import preprocessing', 'from sklearn.cluster import KMeans', 'from sklearn.cluster import AgglomerativeClustering', 'import scipy.cluster.hierarchy as shc', 'import itertools', 'import math', 'import random', 'from numpy import linalg as LA', 'from scipy.stats import pearsonr', 'from statsmodels.tsa.stattools import coint', '', '#load dataframe', "norm_ret_df = pd.read_csv('./norm_ret_df.csv', index_col = False)", '#perform PCA with 10 components, whiten=True -> will return 10 principle components for each asset', 'pca = PCA(n_components=10, whiten=True)', 'beta=pca.fit_transform(norm_ret_df.T)', 'df_beta=pd.DataFrame(beta)', 'stock_pca = df_beta.values', 'print(stock_pca.shape)', '', '#standardise principal component array with prepcoessing.StandardScaler() and apply this with fit_transform to the pca_stock array ', 'X = preprocessing.StandardScaler().fit_transform(stock_pca)', '', '######### DBSCAN #########', '# #perform DBSCAN clustering algorithm on preprocessed data eps=2, min_samples =3', 'dbscan = DBSCAN(eps=2, min_samples=3)', 'dbscan.fit_predict(X)', 'labels = dbscan.labels_', 'n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)', 'print(labels)', '', '#attach cluster labels to the last column norm_ret_df', "df_beta['labels']=labels", 'df_beta[\'labels\'] = df_beta[\'labels\'].astype("category")', 'df_beta=df_beta.set_index(norm_ret_df.T.index)', "df_beta.sort_values(by=['labels'])", 'print(df_beta)', '', '#sort the rows with the same label into each of their own dataframes', 'k_list=np.arange(0,n_clusters_,1)', 'k_list', 'd = {}', 'for k in k_list:', ' d[k] = pd.DataFrame()', " d[k] = df_beta[df_beta['labels'] ==k]", '', 'print(d[0])', 'print(d[1])', 'print(d[2])', 'print(d[3])', 'print(d[4])', '', '#######KMEANS#######', '#run kmeans on pre processed data (X) set clusters to 5, n_init = 10, and random_state=42 so alog is iniitialised the same every time', 'kmeans = KMeans(n_clusters=5, n_init=10, random_state=42)', 'kmeans.fit(X)', 'label_kmeans=kmeans.predict(X) ', 'center_kmeans=kmeans.cluster_centers_', 'print(label_kmeans)', 'print(center_kmeans)', '', '####Hierarchical Clustering#####', '# run agglomerative hierarchical clustering on preprocessed data X', 'clusters = 5', "hc = AgglomerativeClustering(n_clusters= clusters, affinity='euclidean', linkage='ward')", 'labels_hc = hc.fit_predict(X)', 'print(labels_hc)', '', '###optimal pairs via statistical analysis with DBSCAN clusters', '', '#fetch normalised log returns for assets belonging to DBSCAN cluster 1 (label = 1)', 'cluster1_asset_list = d[1].index.values', 'clusters_norm_ret_df = norm_ret_df[cluster1_asset_list]', 'print(clusters_norm_ret_df)', '', '#cumulative returns', 'cumulative_norm_ret=clusters_norm_ret_df.cumsum()', '', '#optimal pairs via minimum sum of Euclidean squared distances btw cumulative log normalised returns', 'pair_order_list = itertools.combinations(cluster1_asset_list,2)', 'pairs=list(pair_order_list)', 'asset1_list=[]', 'asset2_list=[]', 'euclidean_distance_list=[]', 'for i in range(0,len(pairs)):', ' asset1_list.append(pairs[i][0])', ' asset2_list.append(pairs[i][1])', '', ' dist = LA.norm(cumulative_norm_ret[asset1_list[i]]-cumulative_norm_ret[asset2_list[i]])', ' euclidean_distance_list.append(dist)', '', '# asset1_list,asset2_list', '', 'sdd_list=list(zip(pairs,euclidean_distance_list))', 'sdd_list.sort(key = lambda x: x[1])', '', '#sort every pairwise combination based off of the euclidean squared distances. A unique optimal pair will occur with the minimum.', '# example: if pair A and B have a distance of 2 and A and C have a distance of 3 and C and D have a distance of 4, the optimal pairing would be (A,B) and (C,D)', '# write the pairs in the tuple form (A,B) returns a list of these unique optimal pairs', '# Each asset in a pair should not have previously been paired with another (no repeating assets per pairs)', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sdd_list)):', ' sdd1.append(sdd_list[i][0][0])', ' sdd2.append(sdd_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_messd = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sdd_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_messd.append(pair)', '', ' if len(selected_pairs_messd) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_messd)', '', '####### optimal pairs through correlation strategy use the normalised log returns of DBSCAN cluster1 assets', '', '#calculate pearson correlation for every possible pairing of assets', 'pearson_corr_list=[]', '', 'for i in range(0,len(pairs)):', ' corr= pearsonr(clusters_norm_ret_df[pairs[i][0]],clusters_norm_ret_df[pairs[i][1]])[0]', ' pearson_corr_list.append(corr)', '', '#sort pairs by pearson correlation', 'sort_corr_list=list(zip(pairs,pearson_corr_list))', 'sort_corr_list.sort(key = lambda x: x[1])', '', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sort_corr_list)):']
[' sdd1.append(sort_corr_list[i][0][0])', ' sdd2.append(sort_corr_list[i][0][1])']
['', 'selected_stocks = []', 'selected_pairs_corr = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sort_corr_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_corr.append(pair)', '', ' if len(selected_pairs_corr) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_corr)', '', '###### check which asset pairs are cointegrated from DSCAN cluster 1', 'coint_pairs=[]', 'np.random.seed(107)', 'for i in range(0,len(pairs)):', ' score, pvalue, _ = coint(np.cumsum(clusters_norm_ret_df[pairs[i][0]]),np.cumsum(clusters_norm_ret_df[pairs[i][1]]))', ' confidence_level = 0.05', ' if pvalue < confidence_level:', ' coint_pairs.append(pairs[i])', 'print(coint_pairs)']
[{'reason_category': 'Loop Body', 'usage_line': 153}, {'reason_category': 'Loop Body', 'usage_line': 154}]
Variable 'sdd1' used at line 153 is defined at line 150 and has a Short-Range dependency. Variable 'sort_corr_list' used at line 153 is defined at line 147 and has a Short-Range dependency. Variable 'i' used at line 153 is part of a Loop defined at line 152 and has a Short-Range dependency. Variable 'sdd2' used at line 154 is defined at line 151 and has a Short-Range dependency. Variable 'sort_corr_list' used at line 154 is defined at line 147 and has a Short-Range dependency. Variable 'i' used at line 154 is part of a Loop defined at line 152 and has a Short-Range dependency.
{'Loop Body': 2}
{'Variable Short-Range': 4, 'Variable Loop Short-Range': 2}
completion_python
Timeseries_Clustering
166
169
['import pandas as pd', 'import numpy as np', 'import scipy.stats as stats', 'from sklearn.decomposition import PCA', 'from sklearn import metrics', 'from sklearn.cluster import DBSCAN', 'from sklearn.manifold import TSNE', 'from sklearn import preprocessing', 'from sklearn.cluster import KMeans', 'from sklearn.cluster import AgglomerativeClustering', 'import scipy.cluster.hierarchy as shc', 'import itertools', 'import math', 'import random', 'from numpy import linalg as LA', 'from scipy.stats import pearsonr', 'from statsmodels.tsa.stattools import coint', '', '#load dataframe', "norm_ret_df = pd.read_csv('./norm_ret_df.csv', index_col = False)", '#perform PCA with 10 components, whiten=True -> will return 10 principle components for each asset', 'pca = PCA(n_components=10, whiten=True)', 'beta=pca.fit_transform(norm_ret_df.T)', 'df_beta=pd.DataFrame(beta)', 'stock_pca = df_beta.values', 'print(stock_pca.shape)', '', '#standardise principal component array with prepcoessing.StandardScaler() and apply this with fit_transform to the pca_stock array ', 'X = preprocessing.StandardScaler().fit_transform(stock_pca)', '', '######### DBSCAN #########', '# #perform DBSCAN clustering algorithm on preprocessed data eps=2, min_samples =3', 'dbscan = DBSCAN(eps=2, min_samples=3)', 'dbscan.fit_predict(X)', 'labels = dbscan.labels_', 'n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)', 'print(labels)', '', '#attach cluster labels to the last column norm_ret_df', "df_beta['labels']=labels", 'df_beta[\'labels\'] = df_beta[\'labels\'].astype("category")', 'df_beta=df_beta.set_index(norm_ret_df.T.index)', "df_beta.sort_values(by=['labels'])", 'print(df_beta)', '', '#sort the rows with the same label into each of their own dataframes', 'k_list=np.arange(0,n_clusters_,1)', 'k_list', 'd = {}', 'for k in k_list:', ' d[k] = pd.DataFrame()', " d[k] = df_beta[df_beta['labels'] ==k]", '', 'print(d[0])', 'print(d[1])', 'print(d[2])', 'print(d[3])', 'print(d[4])', '', '#######KMEANS#######', '#run kmeans on pre processed data (X) set clusters to 5, n_init = 10, and random_state=42 so alog is iniitialised the same every time', 'kmeans = KMeans(n_clusters=5, n_init=10, random_state=42)', 'kmeans.fit(X)', 'label_kmeans=kmeans.predict(X) ', 'center_kmeans=kmeans.cluster_centers_', 'print(label_kmeans)', 'print(center_kmeans)', '', '####Hierarchical Clustering#####', '# run agglomerative hierarchical clustering on preprocessed data X', 'clusters = 5', "hc = AgglomerativeClustering(n_clusters= clusters, affinity='euclidean', linkage='ward')", 'labels_hc = hc.fit_predict(X)', 'print(labels_hc)', '', '###optimal pairs via statistical analysis with DBSCAN clusters', '', '#fetch normalised log returns for assets belonging to DBSCAN cluster 1 (label = 1)', 'cluster1_asset_list = d[1].index.values', 'clusters_norm_ret_df = norm_ret_df[cluster1_asset_list]', 'print(clusters_norm_ret_df)', '', '#cumulative returns', 'cumulative_norm_ret=clusters_norm_ret_df.cumsum()', '', '#optimal pairs via minimum sum of Euclidean squared distances btw cumulative log normalised returns', 'pair_order_list = itertools.combinations(cluster1_asset_list,2)', 'pairs=list(pair_order_list)', 'asset1_list=[]', 'asset2_list=[]', 'euclidean_distance_list=[]', 'for i in range(0,len(pairs)):', ' asset1_list.append(pairs[i][0])', ' asset2_list.append(pairs[i][1])', '', ' dist = LA.norm(cumulative_norm_ret[asset1_list[i]]-cumulative_norm_ret[asset2_list[i]])', ' euclidean_distance_list.append(dist)', '', '# asset1_list,asset2_list', '', 'sdd_list=list(zip(pairs,euclidean_distance_list))', 'sdd_list.sort(key = lambda x: x[1])', '', '#sort every pairwise combination based off of the euclidean squared distances. A unique optimal pair will occur with the minimum.', '# example: if pair A and B have a distance of 2 and A and C have a distance of 3 and C and D have a distance of 4, the optimal pairing would be (A,B) and (C,D)', '# write the pairs in the tuple form (A,B) returns a list of these unique optimal pairs', '# Each asset in a pair should not have previously been paired with another (no repeating assets per pairs)', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sdd_list)):', ' sdd1.append(sdd_list[i][0][0])', ' sdd2.append(sdd_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_messd = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sdd_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_messd.append(pair)', '', ' if len(selected_pairs_messd) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_messd)', '', '####### optimal pairs through correlation strategy use the normalised log returns of DBSCAN cluster1 assets', '', '#calculate pearson correlation for every possible pairing of assets', 'pearson_corr_list=[]', '', 'for i in range(0,len(pairs)):', ' corr= pearsonr(clusters_norm_ret_df[pairs[i][0]],clusters_norm_ret_df[pairs[i][1]])[0]', ' pearson_corr_list.append(corr)', '', '#sort pairs by pearson correlation', 'sort_corr_list=list(zip(pairs,pearson_corr_list))', 'sort_corr_list.sort(key = lambda x: x[1])', '', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sort_corr_list)):', ' sdd1.append(sort_corr_list[i][0][0])', ' sdd2.append(sort_corr_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_corr = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sort_corr_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):']
[' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_corr.append(pair)']
['', ' if len(selected_pairs_corr) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_corr)', '', '###### check which asset pairs are cointegrated from DSCAN cluster 1', 'coint_pairs=[]', 'np.random.seed(107)', 'for i in range(0,len(pairs)):', ' score, pvalue, _ = coint(np.cumsum(clusters_norm_ret_df[pairs[i][0]]),np.cumsum(clusters_norm_ret_df[pairs[i][1]]))', ' confidence_level = 0.05', ' if pvalue < confidence_level:', ' coint_pairs.append(pairs[i])', 'print(coint_pairs)']
[{'reason_category': 'Loop Body', 'usage_line': 166}, {'reason_category': 'If Body', 'usage_line': 166}, {'reason_category': 'Loop Body', 'usage_line': 167}, {'reason_category': 'If Body', 'usage_line': 167}, {'reason_category': 'Loop Body', 'usage_line': 168}, {'reason_category': 'If Body', 'usage_line': 168}, {'reason_category': 'Loop Body', 'usage_line': 169}, {'reason_category': 'If Body', 'usage_line': 169}]
Variable 'selected_stocks' used at line 166 is defined at line 156 and has a Short-Range dependency. Variable 's1' used at line 166 is defined at line 162 and has a Short-Range dependency. Variable 'selected_stocks' used at line 167 is defined at line 156 and has a Medium-Range dependency. Variable 's2' used at line 167 is defined at line 163 and has a Short-Range dependency. Variable 's1' used at line 168 is defined at line 162 and has a Short-Range dependency. Variable 's2' used at line 168 is defined at line 163 and has a Short-Range dependency. Variable 'selected_pairs_corr' used at line 169 is defined at line 157 and has a Medium-Range dependency. Variable 'pair' used at line 169 is defined at line 168 and has a Short-Range dependency.
{'Loop Body': 4, 'If Body': 4}
{'Variable Short-Range': 6, 'Variable Medium-Range': 2}
completion_python
Timeseries_Clustering
183
186
['import pandas as pd', 'import numpy as np', 'import scipy.stats as stats', 'from sklearn.decomposition import PCA', 'from sklearn import metrics', 'from sklearn.cluster import DBSCAN', 'from sklearn.manifold import TSNE', 'from sklearn import preprocessing', 'from sklearn.cluster import KMeans', 'from sklearn.cluster import AgglomerativeClustering', 'import scipy.cluster.hierarchy as shc', 'import itertools', 'import math', 'import random', 'from numpy import linalg as LA', 'from scipy.stats import pearsonr', 'from statsmodels.tsa.stattools import coint', '', '#load dataframe', "norm_ret_df = pd.read_csv('./norm_ret_df.csv', index_col = False)", '#perform PCA with 10 components, whiten=True -> will return 10 principle components for each asset', 'pca = PCA(n_components=10, whiten=True)', 'beta=pca.fit_transform(norm_ret_df.T)', 'df_beta=pd.DataFrame(beta)', 'stock_pca = df_beta.values', 'print(stock_pca.shape)', '', '#standardise principal component array with prepcoessing.StandardScaler() and apply this with fit_transform to the pca_stock array ', 'X = preprocessing.StandardScaler().fit_transform(stock_pca)', '', '######### DBSCAN #########', '# #perform DBSCAN clustering algorithm on preprocessed data eps=2, min_samples =3', 'dbscan = DBSCAN(eps=2, min_samples=3)', 'dbscan.fit_predict(X)', 'labels = dbscan.labels_', 'n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)', 'print(labels)', '', '#attach cluster labels to the last column norm_ret_df', "df_beta['labels']=labels", 'df_beta[\'labels\'] = df_beta[\'labels\'].astype("category")', 'df_beta=df_beta.set_index(norm_ret_df.T.index)', "df_beta.sort_values(by=['labels'])", 'print(df_beta)', '', '#sort the rows with the same label into each of their own dataframes', 'k_list=np.arange(0,n_clusters_,1)', 'k_list', 'd = {}', 'for k in k_list:', ' d[k] = pd.DataFrame()', " d[k] = df_beta[df_beta['labels'] ==k]", '', 'print(d[0])', 'print(d[1])', 'print(d[2])', 'print(d[3])', 'print(d[4])', '', '#######KMEANS#######', '#run kmeans on pre processed data (X) set clusters to 5, n_init = 10, and random_state=42 so alog is iniitialised the same every time', 'kmeans = KMeans(n_clusters=5, n_init=10, random_state=42)', 'kmeans.fit(X)', 'label_kmeans=kmeans.predict(X) ', 'center_kmeans=kmeans.cluster_centers_', 'print(label_kmeans)', 'print(center_kmeans)', '', '####Hierarchical Clustering#####', '# run agglomerative hierarchical clustering on preprocessed data X', 'clusters = 5', "hc = AgglomerativeClustering(n_clusters= clusters, affinity='euclidean', linkage='ward')", 'labels_hc = hc.fit_predict(X)', 'print(labels_hc)', '', '###optimal pairs via statistical analysis with DBSCAN clusters', '', '#fetch normalised log returns for assets belonging to DBSCAN cluster 1 (label = 1)', 'cluster1_asset_list = d[1].index.values', 'clusters_norm_ret_df = norm_ret_df[cluster1_asset_list]', 'print(clusters_norm_ret_df)', '', '#cumulative returns', 'cumulative_norm_ret=clusters_norm_ret_df.cumsum()', '', '#optimal pairs via minimum sum of Euclidean squared distances btw cumulative log normalised returns', 'pair_order_list = itertools.combinations(cluster1_asset_list,2)', 'pairs=list(pair_order_list)', 'asset1_list=[]', 'asset2_list=[]', 'euclidean_distance_list=[]', 'for i in range(0,len(pairs)):', ' asset1_list.append(pairs[i][0])', ' asset2_list.append(pairs[i][1])', '', ' dist = LA.norm(cumulative_norm_ret[asset1_list[i]]-cumulative_norm_ret[asset2_list[i]])', ' euclidean_distance_list.append(dist)', '', '# asset1_list,asset2_list', '', 'sdd_list=list(zip(pairs,euclidean_distance_list))', 'sdd_list.sort(key = lambda x: x[1])', '', '#sort every pairwise combination based off of the euclidean squared distances. A unique optimal pair will occur with the minimum.', '# example: if pair A and B have a distance of 2 and A and C have a distance of 3 and C and D have a distance of 4, the optimal pairing would be (A,B) and (C,D)', '# write the pairs in the tuple form (A,B) returns a list of these unique optimal pairs', '# Each asset in a pair should not have previously been paired with another (no repeating assets per pairs)', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sdd_list)):', ' sdd1.append(sdd_list[i][0][0])', ' sdd2.append(sdd_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_messd = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sdd_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_messd.append(pair)', '', ' if len(selected_pairs_messd) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_messd)', '', '####### optimal pairs through correlation strategy use the normalised log returns of DBSCAN cluster1 assets', '', '#calculate pearson correlation for every possible pairing of assets', 'pearson_corr_list=[]', '', 'for i in range(0,len(pairs)):', ' corr= pearsonr(clusters_norm_ret_df[pairs[i][0]],clusters_norm_ret_df[pairs[i][1]])[0]', ' pearson_corr_list.append(corr)', '', '#sort pairs by pearson correlation', 'sort_corr_list=list(zip(pairs,pearson_corr_list))', 'sort_corr_list.sort(key = lambda x: x[1])', '', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sort_corr_list)):', ' sdd1.append(sort_corr_list[i][0][0])', ' sdd2.append(sort_corr_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_corr = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sort_corr_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_corr.append(pair)', '', ' if len(selected_pairs_corr) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_corr)', '', '###### check which asset pairs are cointegrated from DSCAN cluster 1', 'coint_pairs=[]', 'np.random.seed(107)', 'for i in range(0,len(pairs)):']
[' score, pvalue, _ = coint(np.cumsum(clusters_norm_ret_df[pairs[i][0]]),np.cumsum(clusters_norm_ret_df[pairs[i][1]]))', ' confidence_level = 0.05', ' if pvalue < confidence_level:', ' coint_pairs.append(pairs[i])']
['print(coint_pairs)']
[{'reason_category': 'Loop Body', 'usage_line': 183}, {'reason_category': 'Loop Body', 'usage_line': 184}, {'reason_category': 'Loop Body', 'usage_line': 185}, {'reason_category': 'If Condition', 'usage_line': 185}, {'reason_category': 'Loop Body', 'usage_line': 186}, {'reason_category': 'If Body', 'usage_line': 186}]
Library 'coint' used at line 183 is imported at line 17 and has a Long-Range dependency. Library 'np' used at line 183 is imported at line 2 and has a Long-Range dependency. Variable 'clusters_norm_ret_df' used at line 183 is defined at line 80 and has a Long-Range dependency. Variable 'pairs' used at line 183 is defined at line 88 and has a Long-Range dependency. Variable 'i' used at line 183 is part of a Loop defined at line 182 and has a Short-Range dependency. Variable 'pvalue' used at line 185 is defined at line 183 and has a Short-Range dependency. Variable 'confidence_level' used at line 185 is defined at line 184 and has a Short-Range dependency. Variable 'coint_pairs' used at line 186 is defined at line 180 and has a Short-Range dependency. Variable 'pairs' used at line 186 is defined at line 88 and has a Long-Range dependency. Variable 'i' used at line 186 is part of a Loop defined at line 182 and has a Short-Range dependency.
{'Loop Body': 4, 'If Condition': 1, 'If Body': 1}
{'Library Long-Range': 2, 'Variable Long-Range': 3, 'Variable Loop Short-Range': 2, 'Variable Short-Range': 3}
completion_python
Timeseries_Clustering
186
186
['import pandas as pd', 'import numpy as np', 'import scipy.stats as stats', 'from sklearn.decomposition import PCA', 'from sklearn import metrics', 'from sklearn.cluster import DBSCAN', 'from sklearn.manifold import TSNE', 'from sklearn import preprocessing', 'from sklearn.cluster import KMeans', 'from sklearn.cluster import AgglomerativeClustering', 'import scipy.cluster.hierarchy as shc', 'import itertools', 'import math', 'import random', 'from numpy import linalg as LA', 'from scipy.stats import pearsonr', 'from statsmodels.tsa.stattools import coint', '', '#load dataframe', "norm_ret_df = pd.read_csv('./norm_ret_df.csv', index_col = False)", '#perform PCA with 10 components, whiten=True -> will return 10 principle components for each asset', 'pca = PCA(n_components=10, whiten=True)', 'beta=pca.fit_transform(norm_ret_df.T)', 'df_beta=pd.DataFrame(beta)', 'stock_pca = df_beta.values', 'print(stock_pca.shape)', '', '#standardise principal component array with prepcoessing.StandardScaler() and apply this with fit_transform to the pca_stock array ', 'X = preprocessing.StandardScaler().fit_transform(stock_pca)', '', '######### DBSCAN #########', '# #perform DBSCAN clustering algorithm on preprocessed data eps=2, min_samples =3', 'dbscan = DBSCAN(eps=2, min_samples=3)', 'dbscan.fit_predict(X)', 'labels = dbscan.labels_', 'n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)', 'print(labels)', '', '#attach cluster labels to the last column norm_ret_df', "df_beta['labels']=labels", 'df_beta[\'labels\'] = df_beta[\'labels\'].astype("category")', 'df_beta=df_beta.set_index(norm_ret_df.T.index)', "df_beta.sort_values(by=['labels'])", 'print(df_beta)', '', '#sort the rows with the same label into each of their own dataframes', 'k_list=np.arange(0,n_clusters_,1)', 'k_list', 'd = {}', 'for k in k_list:', ' d[k] = pd.DataFrame()', " d[k] = df_beta[df_beta['labels'] ==k]", '', 'print(d[0])', 'print(d[1])', 'print(d[2])', 'print(d[3])', 'print(d[4])', '', '#######KMEANS#######', '#run kmeans on pre processed data (X) set clusters to 5, n_init = 10, and random_state=42 so alog is iniitialised the same every time', 'kmeans = KMeans(n_clusters=5, n_init=10, random_state=42)', 'kmeans.fit(X)', 'label_kmeans=kmeans.predict(X) ', 'center_kmeans=kmeans.cluster_centers_', 'print(label_kmeans)', 'print(center_kmeans)', '', '####Hierarchical Clustering#####', '# run agglomerative hierarchical clustering on preprocessed data X', 'clusters = 5', "hc = AgglomerativeClustering(n_clusters= clusters, affinity='euclidean', linkage='ward')", 'labels_hc = hc.fit_predict(X)', 'print(labels_hc)', '', '###optimal pairs via statistical analysis with DBSCAN clusters', '', '#fetch normalised log returns for assets belonging to DBSCAN cluster 1 (label = 1)', 'cluster1_asset_list = d[1].index.values', 'clusters_norm_ret_df = norm_ret_df[cluster1_asset_list]', 'print(clusters_norm_ret_df)', '', '#cumulative returns', 'cumulative_norm_ret=clusters_norm_ret_df.cumsum()', '', '#optimal pairs via minimum sum of Euclidean squared distances btw cumulative log normalised returns', 'pair_order_list = itertools.combinations(cluster1_asset_list,2)', 'pairs=list(pair_order_list)', 'asset1_list=[]', 'asset2_list=[]', 'euclidean_distance_list=[]', 'for i in range(0,len(pairs)):', ' asset1_list.append(pairs[i][0])', ' asset2_list.append(pairs[i][1])', '', ' dist = LA.norm(cumulative_norm_ret[asset1_list[i]]-cumulative_norm_ret[asset2_list[i]])', ' euclidean_distance_list.append(dist)', '', '# asset1_list,asset2_list', '', 'sdd_list=list(zip(pairs,euclidean_distance_list))', 'sdd_list.sort(key = lambda x: x[1])', '', '#sort every pairwise combination based off of the euclidean squared distances. A unique optimal pair will occur with the minimum.', '# example: if pair A and B have a distance of 2 and A and C have a distance of 3 and C and D have a distance of 4, the optimal pairing would be (A,B) and (C,D)', '# write the pairs in the tuple form (A,B) returns a list of these unique optimal pairs', '# Each asset in a pair should not have previously been paired with another (no repeating assets per pairs)', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sdd_list)):', ' sdd1.append(sdd_list[i][0][0])', ' sdd2.append(sdd_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_messd = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sdd_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_messd.append(pair)', '', ' if len(selected_pairs_messd) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_messd)', '', '####### optimal pairs through correlation strategy use the normalised log returns of DBSCAN cluster1 assets', '', '#calculate pearson correlation for every possible pairing of assets', 'pearson_corr_list=[]', '', 'for i in range(0,len(pairs)):', ' corr= pearsonr(clusters_norm_ret_df[pairs[i][0]],clusters_norm_ret_df[pairs[i][1]])[0]', ' pearson_corr_list.append(corr)', '', '#sort pairs by pearson correlation', 'sort_corr_list=list(zip(pairs,pearson_corr_list))', 'sort_corr_list.sort(key = lambda x: x[1])', '', 'sdd1=[]', 'sdd2=[]', 'for i in range(0,len(sort_corr_list)):', ' sdd1.append(sort_corr_list[i][0][0])', ' sdd2.append(sort_corr_list[i][0][1])', '', 'selected_stocks = []', 'selected_pairs_corr = []', 'opt_asset1=[]', 'opt_asset2=[]', '', 'for i in range(0,len(sort_corr_list)):', ' s1=sdd1[i]', ' s2=sdd2[i]', '', ' if (s1 not in selected_stocks) and (s2 not in selected_stocks):', ' selected_stocks.append(s1)', ' selected_stocks.append(s2)', ' pair=(s1,s2)', ' selected_pairs_corr.append(pair)', '', ' if len(selected_pairs_corr) == math.comb(len(cluster1_asset_list),2):', ' break', '', 'opt_asset1=selected_stocks[0:len(selected_stocks)-1:2]', 'opt_asset2=selected_stocks[1:len(selected_stocks):2]', '', 'print(selected_pairs_corr)', '', '###### check which asset pairs are cointegrated from DSCAN cluster 1', 'coint_pairs=[]', 'np.random.seed(107)', 'for i in range(0,len(pairs)):', ' score, pvalue, _ = coint(np.cumsum(clusters_norm_ret_df[pairs[i][0]]),np.cumsum(clusters_norm_ret_df[pairs[i][1]]))', ' confidence_level = 0.05', ' if pvalue < confidence_level:']
[' coint_pairs.append(pairs[i])']
['print(coint_pairs)']
[{'reason_category': 'Loop Body', 'usage_line': 186}, {'reason_category': 'If Body', 'usage_line': 186}]
Variable 'coint_pairs' used at line 186 is defined at line 180 and has a Short-Range dependency. Variable 'pairs' used at line 186 is defined at line 88 and has a Long-Range dependency. Variable 'i' used at line 186 is part of a Loop defined at line 182 and has a Short-Range dependency.
{'Loop Body': 1, 'If Body': 1}
{'Variable Short-Range': 1, 'Variable Long-Range': 1, 'Variable Loop Short-Range': 1}