diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/appo/__pycache__/appo_torch_policy.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/appo/__pycache__/appo_torch_policy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f3933a3c587b5f46966f6edcf0c75ad00a64b0f Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/appo/__pycache__/appo_torch_policy.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/appo/__pycache__/utils.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/appo/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c15fb10d0135ace27275612bb1da34f2e9b0fb35 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/appo/__pycache__/utils.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/appo/torch/appo_torch_learner.py b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/appo/torch/appo_torch_learner.py new file mode 100644 index 0000000000000000000000000000000000000000..67d585424343c1de3627675549c6b2c229febc7a --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/appo/torch/appo_torch_learner.py @@ -0,0 +1,260 @@ +"""Asynchronous Proximal Policy Optimization (APPO) + +The algorithm is described in [1] (under the name of "IMPACT"): + +Detailed documentation: +https://docs.ray.io/en/master/rllib-algorithms.html#appo + +[1] IMPACT: Importance Weighted Asynchronous Architectures with Clipped Target Networks. +Luo et al. 2020 +https://arxiv.org/pdf/1912.00167 +""" +from typing import Dict + +from ray.rllib.algorithms.appo.appo import ( + APPOConfig, + LEARNER_RESULTS_CURR_KL_COEFF_KEY, + LEARNER_RESULTS_KL_KEY, + TARGET_ACTION_DIST_LOGITS_KEY, +) +from ray.rllib.algorithms.appo.appo_learner import APPOLearner +from ray.rllib.algorithms.impala.torch.impala_torch_learner import IMPALATorchLearner +from ray.rllib.algorithms.impala.torch.vtrace_torch_v2 import ( + make_time_major, + vtrace_torch, +) +from ray.rllib.core.columns import Columns +from ray.rllib.core.learner.learner import POLICY_LOSS_KEY, VF_LOSS_KEY, ENTROPY_KEY +from ray.rllib.core.rl_module.apis import TargetNetworkAPI, ValueFunctionAPI +from ray.rllib.utils.annotations import override +from ray.rllib.utils.framework import try_import_torch +from ray.rllib.utils.numpy import convert_to_numpy +from ray.rllib.utils.typing import ModuleID, TensorType + +torch, nn = try_import_torch() + + +class APPOTorchLearner(APPOLearner, IMPALATorchLearner): + """Implements APPO loss / update logic on top of IMPALATorchLearner.""" + + @override(IMPALATorchLearner) + def compute_loss_for_module( + self, + *, + module_id: ModuleID, + config: APPOConfig, + batch: Dict, + fwd_out: Dict[str, TensorType], + ) -> TensorType: + module = self.module[module_id].unwrapped() + assert isinstance(module, TargetNetworkAPI) + assert isinstance(module, ValueFunctionAPI) + + # TODO (sven): Now that we do the +1ts trick to be less vulnerable about + # bootstrap values at the end of rollouts in the new stack, we might make + # this a more flexible, configurable parameter for users, e.g. + # `v_trace_seq_len` (independent of `rollout_fragment_length`). Separation + # of concerns (sampling vs learning). + rollout_frag_or_episode_len = config.get_rollout_fragment_length() + recurrent_seq_len = batch.get("seq_lens") + + loss_mask = batch[Columns.LOSS_MASK].float() + loss_mask_time_major = make_time_major( + loss_mask, + trajectory_len=rollout_frag_or_episode_len, + recurrent_seq_len=recurrent_seq_len, + ) + size_loss_mask = torch.sum(loss_mask) + + values = module.compute_values( + batch, embeddings=fwd_out.get(Columns.EMBEDDINGS) + ) + + action_dist_cls_train = module.get_train_action_dist_cls() + + # Policy being trained (current). + current_action_dist = action_dist_cls_train.from_logits( + fwd_out[Columns.ACTION_DIST_INPUTS] + ) + current_actions_logp = current_action_dist.logp(batch[Columns.ACTIONS]) + current_actions_logp_time_major = make_time_major( + current_actions_logp, + trajectory_len=rollout_frag_or_episode_len, + recurrent_seq_len=recurrent_seq_len, + ) + + # Target policy. + target_action_dist = action_dist_cls_train.from_logits( + module.forward_target(batch)[TARGET_ACTION_DIST_LOGITS_KEY] + ) + target_actions_logp = target_action_dist.logp(batch[Columns.ACTIONS]) + target_actions_logp_time_major = make_time_major( + target_actions_logp, + trajectory_len=rollout_frag_or_episode_len, + recurrent_seq_len=recurrent_seq_len, + ) + + # EnvRunner's policy (behavior). + behavior_actions_logp = batch[Columns.ACTION_LOGP] + behavior_actions_logp_time_major = make_time_major( + behavior_actions_logp, + trajectory_len=rollout_frag_or_episode_len, + recurrent_seq_len=recurrent_seq_len, + ) + + rewards_time_major = make_time_major( + batch[Columns.REWARDS], + trajectory_len=rollout_frag_or_episode_len, + recurrent_seq_len=recurrent_seq_len, + ) + + assert Columns.VALUES_BOOTSTRAPPED not in batch + values_time_major = make_time_major( + values, + trajectory_len=rollout_frag_or_episode_len, + recurrent_seq_len=recurrent_seq_len, + ) + # Use as bootstrap values the vf-preds in the next "batch row", except + # for the very last row (which doesn't have a next row), for which the + # bootstrap value does not matter b/c it has a +1ts value at its end + # anyways. So we chose an arbitrary item (for simplicity of not having to + # move new data to the device). + bootstrap_values = torch.cat( + [ + values_time_major[0][1:], # 0th ts values from "next row" + values_time_major[0][0:1], # <- can use any arbitrary value here + ], + dim=0, + ) + + # The discount factor that is used should be `gamma * lambda_`, except for + # termination timesteps, in which case the discount factor should be 0. + discounts_time_major = ( + ( + 1.0 + - make_time_major( + batch[Columns.TERMINATEDS], + trajectory_len=rollout_frag_or_episode_len, + recurrent_seq_len=recurrent_seq_len, + ).float() + # See [1] 3.1: Discounts must contain the GAE lambda_ parameter as well. + ) + * config.gamma + * config.lambda_ + ) + + # Note that vtrace will compute the main loop on the CPU for better performance. + vtrace_adjusted_target_values, pg_advantages = vtrace_torch( + # See [1] 3.1: For AˆV-GAE, the ratios used are: min(c¯, π(target)/π(i)) + # π(target) + target_action_log_probs=target_actions_logp_time_major, + # π(i) + behaviour_action_log_probs=behavior_actions_logp_time_major, + # See [1] 3.1: Discounts must contain the GAE lambda_ parameter as well. + discounts=discounts_time_major, + rewards=rewards_time_major, + values=values_time_major, + bootstrap_values=bootstrap_values, + # c¯ + clip_rho_threshold=config.vtrace_clip_rho_threshold, + # c¯ (but we allow users to distinguish between c¯ used for + # value estimates and c¯ used for the advantages. + clip_pg_rho_threshold=config.vtrace_clip_pg_rho_threshold, + ) + pg_advantages = pg_advantages * loss_mask_time_major + + # The policy gradient loss. + # As described in [1], use a logp-ratio of: + # min(π(i) / π(target), ρ) * (π / π(i)), where .. + # - π are the action probs from the current (learner) policy + # - π(i) are the action probs from the ith EnvRunner + # - π(target) are the action probs from the target network + # - ρ is the "target-worker clipping" (2.0 in the paper) + target_worker_is_ratio = torch.clip( + torch.exp( + behavior_actions_logp_time_major - target_actions_logp_time_major + ), + 0.0, + config.target_worker_clipping, + ) + target_worker_logp_ratio = target_worker_is_ratio * torch.exp( + current_actions_logp_time_major - behavior_actions_logp_time_major + ) + surrogate_loss = torch.minimum( + pg_advantages * target_worker_logp_ratio, + pg_advantages + * torch.clip( + target_worker_logp_ratio, + 1 - config.clip_param, + 1 + config.clip_param, + ), + ) + mean_pi_loss = -(torch.sum(surrogate_loss) / size_loss_mask) + + # Compute KL-loss (if required): KL divergence between current action dist. + # and target action dict. + if config.use_kl_loss: + action_kl = target_action_dist.kl(current_action_dist) * loss_mask + mean_kl_loss = torch.sum(action_kl) / size_loss_mask + else: + mean_kl_loss = 0.0 + + # Compute value function loss. + delta = values_time_major - vtrace_adjusted_target_values + vf_loss = 0.5 * torch.sum(torch.pow(delta, 2.0) * loss_mask_time_major) + mean_vf_loss = vf_loss / size_loss_mask + + # Compute entropy loss. + mean_entropy_loss = ( + -torch.sum(current_action_dist.entropy() * loss_mask) / size_loss_mask + ) + + # The summed weighted loss. + total_loss = ( + mean_pi_loss + + (mean_vf_loss * config.vf_loss_coeff) + + ( + mean_entropy_loss + * self.entropy_coeff_schedulers_per_module[ + module_id + ].get_current_value() + ) + + (mean_kl_loss * self.curr_kl_coeffs_per_module[module_id]) + ) + + # Log important loss stats. + self.metrics.log_dict( + { + POLICY_LOSS_KEY: mean_pi_loss, + VF_LOSS_KEY: mean_vf_loss, + ENTROPY_KEY: -mean_entropy_loss, + LEARNER_RESULTS_KL_KEY: mean_kl_loss, + LEARNER_RESULTS_CURR_KL_COEFF_KEY: ( + self.curr_kl_coeffs_per_module[module_id] + ), + }, + key=module_id, + window=1, # <- single items (should not be mean/ema-reduced over time). + ) + # Return the total loss. + return total_loss + + @override(APPOLearner) + def _update_module_kl_coeff(self, module_id: ModuleID, config: APPOConfig) -> None: + # Update the current KL value based on the recently measured value. + # Increase. + kl = convert_to_numpy(self.metrics.peek((module_id, LEARNER_RESULTS_KL_KEY))) + kl_coeff_var = self.curr_kl_coeffs_per_module[module_id] + + if kl > 2.0 * config.kl_target: + # TODO (Kourosh) why not *2.0? + kl_coeff_var.data *= 1.5 + # Decrease. + elif kl < 0.5 * config.kl_target: + kl_coeff_var.data *= 0.5 + + self.metrics.log_value( + (module_id, LEARNER_RESULTS_CURR_KL_COEFF_KEY), + kl_coeff_var.item(), + window=1, + ) diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/cql/__init__.py b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/cql/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..91b0cc69acd7db67d518bdf36f5fdbeef9e1b887 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/cql/__init__.py @@ -0,0 +1,8 @@ +from ray.rllib.algorithms.cql.cql import CQL, CQLConfig +from ray.rllib.algorithms.cql.cql_torch_policy import CQLTorchPolicy + +__all__ = [ + "CQL", + "CQLTorchPolicy", + "CQLConfig", +] diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/cql/__pycache__/cql.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/cql/__pycache__/cql.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3f28b1f34621ec5670acfc89d563113ccb4695c9 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/cql/__pycache__/cql.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/cql/__pycache__/cql_torch_policy.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/cql/__pycache__/cql_torch_policy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..94e421e9e23ffa679ffb3907c9e8fda0690c9733 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/cql/__pycache__/cql_torch_policy.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/cql/cql.py b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/cql/cql.py new file mode 100644 index 0000000000000000000000000000000000000000..b5e97ff9058040d6ff85b634fe511af59404a700 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/cql/cql.py @@ -0,0 +1,389 @@ +import logging +from typing import Optional, Type, Union + +from ray.rllib.algorithms.algorithm_config import AlgorithmConfig, NotProvided +from ray.rllib.algorithms.cql.cql_tf_policy import CQLTFPolicy +from ray.rllib.algorithms.cql.cql_torch_policy import CQLTorchPolicy +from ray.rllib.algorithms.sac.sac import ( + SAC, + SACConfig, +) +from ray.rllib.connectors.common.add_observations_from_episodes_to_batch import ( + AddObservationsFromEpisodesToBatch, +) +from ray.rllib.connectors.learner.add_next_observations_from_episodes_to_train_batch import ( # noqa + AddNextObservationsFromEpisodesToTrainBatch, +) +from ray.rllib.core.learner.learner import Learner +from ray.rllib.core.rl_module.rl_module import RLModuleSpec +from ray.rllib.execution.rollout_ops import ( + synchronous_parallel_sample, +) +from ray.rllib.execution.train_ops import ( + multi_gpu_train_one_step, + train_one_step, +) +from ray.rllib.policy.policy import Policy +from ray.rllib.utils.annotations import OldAPIStack, override +from ray.rllib.utils.deprecation import ( + DEPRECATED_VALUE, + deprecation_warning, +) +from ray.rllib.utils.framework import try_import_tf, try_import_tfp +from ray.rllib.utils.metrics import ( + ALL_MODULES, + LEARNER_RESULTS, + LEARNER_UPDATE_TIMER, + LAST_TARGET_UPDATE_TS, + NUM_AGENT_STEPS_SAMPLED, + NUM_AGENT_STEPS_TRAINED, + NUM_ENV_STEPS_SAMPLED, + NUM_ENV_STEPS_TRAINED, + NUM_TARGET_UPDATES, + OFFLINE_SAMPLING_TIMER, + TARGET_NET_UPDATE_TIMER, + SYNCH_WORKER_WEIGHTS_TIMER, + SAMPLE_TIMER, + TIMERS, +) +from ray.rllib.utils.typing import ResultDict, RLModuleSpecType + +tf1, tf, tfv = try_import_tf() +tfp = try_import_tfp() +logger = logging.getLogger(__name__) + + +class CQLConfig(SACConfig): + """Defines a configuration class from which a CQL can be built. + + .. testcode:: + :skipif: True + + from ray.rllib.algorithms.cql import CQLConfig + config = CQLConfig().training(gamma=0.9, lr=0.01) + config = config.resources(num_gpus=0) + config = config.env_runners(num_env_runners=4) + print(config.to_dict()) + # Build a Algorithm object from the config and run 1 training iteration. + algo = config.build(env="CartPole-v1") + algo.train() + """ + + def __init__(self, algo_class=None): + super().__init__(algo_class=algo_class or CQL) + + # fmt: off + # __sphinx_doc_begin__ + # CQL-specific config settings: + self.bc_iters = 20000 + self.temperature = 1.0 + self.num_actions = 10 + self.lagrangian = False + self.lagrangian_thresh = 5.0 + self.min_q_weight = 5.0 + self.deterministic_backup = True + self.lr = 3e-4 + # Note, the new stack defines learning rates for each component. + # The base learning rate `lr` has to be set to `None`, if using + # the new stack. + self.actor_lr = 1e-4 + self.critic_lr = 1e-3 + self.alpha_lr = 1e-3 + + self.replay_buffer_config = { + "_enable_replay_buffer_api": True, + "type": "MultiAgentPrioritizedReplayBuffer", + "capacity": int(1e6), + # If True prioritized replay buffer will be used. + "prioritized_replay": False, + "prioritized_replay_alpha": 0.6, + "prioritized_replay_beta": 0.4, + "prioritized_replay_eps": 1e-6, + # Whether to compute priorities already on the remote worker side. + "worker_side_prioritization": False, + } + + # Changes to Algorithm's/SACConfig's default: + + # .reporting() + self.min_sample_timesteps_per_iteration = 0 + self.min_train_timesteps_per_iteration = 100 + # fmt: on + # __sphinx_doc_end__ + + self.timesteps_per_iteration = DEPRECATED_VALUE + + @override(SACConfig) + def training( + self, + *, + bc_iters: Optional[int] = NotProvided, + temperature: Optional[float] = NotProvided, + num_actions: Optional[int] = NotProvided, + lagrangian: Optional[bool] = NotProvided, + lagrangian_thresh: Optional[float] = NotProvided, + min_q_weight: Optional[float] = NotProvided, + deterministic_backup: Optional[bool] = NotProvided, + **kwargs, + ) -> "CQLConfig": + """Sets the training-related configuration. + + Args: + bc_iters: Number of iterations with Behavior Cloning pretraining. + temperature: CQL loss temperature. + num_actions: Number of actions to sample for CQL loss + lagrangian: Whether to use the Lagrangian for Alpha Prime (in CQL loss). + lagrangian_thresh: Lagrangian threshold. + min_q_weight: in Q weight multiplier. + deterministic_backup: If the target in the Bellman update should have an + entropy backup. Defaults to `True`. + + Returns: + This updated AlgorithmConfig object. + """ + # Pass kwargs onto super's `training()` method. + super().training(**kwargs) + + if bc_iters is not NotProvided: + self.bc_iters = bc_iters + if temperature is not NotProvided: + self.temperature = temperature + if num_actions is not NotProvided: + self.num_actions = num_actions + if lagrangian is not NotProvided: + self.lagrangian = lagrangian + if lagrangian_thresh is not NotProvided: + self.lagrangian_thresh = lagrangian_thresh + if min_q_weight is not NotProvided: + self.min_q_weight = min_q_weight + if deterministic_backup is not NotProvided: + self.deterministic_backup = deterministic_backup + + return self + + @override(AlgorithmConfig) + def offline_data(self, **kwargs) -> "CQLConfig": + + super().offline_data(**kwargs) + + # Check, if the passed in class incorporates the `OfflinePreLearner` + # interface. + if "prelearner_class" in kwargs: + from ray.rllib.offline.offline_data import OfflinePreLearner + + if not issubclass(kwargs.get("prelearner_class"), OfflinePreLearner): + raise ValueError( + f"`prelearner_class` {kwargs.get('prelearner_class')} is not a " + "subclass of `OfflinePreLearner`. Any class passed to " + "`prelearner_class` needs to implement the interface given by " + "`OfflinePreLearner`." + ) + + return self + + @override(SACConfig) + def get_default_learner_class(self) -> Union[Type["Learner"], str]: + if self.framework_str == "torch": + from ray.rllib.algorithms.cql.torch.cql_torch_learner import CQLTorchLearner + + return CQLTorchLearner + else: + raise ValueError( + f"The framework {self.framework_str} is not supported. " + "Use `'torch'` instead." + ) + + @override(AlgorithmConfig) + def build_learner_connector( + self, + input_observation_space, + input_action_space, + device=None, + ): + pipeline = super().build_learner_connector( + input_observation_space=input_observation_space, + input_action_space=input_action_space, + device=device, + ) + + # Prepend the "add-NEXT_OBS-from-episodes-to-train-batch" connector piece (right + # after the corresponding "add-OBS-..." default piece). + pipeline.insert_after( + AddObservationsFromEpisodesToBatch, + AddNextObservationsFromEpisodesToTrainBatch(), + ) + + return pipeline + + @override(SACConfig) + def validate(self) -> None: + # First check, whether old `timesteps_per_iteration` is used. + if self.timesteps_per_iteration != DEPRECATED_VALUE: + deprecation_warning( + old="timesteps_per_iteration", + new="min_train_timesteps_per_iteration", + error=True, + ) + + # Call super's validation method. + super().validate() + + # CQL-torch performs the optimizer steps inside the loss function. + # Using the multi-GPU optimizer will therefore not work (see multi-GPU + # check above) and we must use the simple optimizer for now. + if self.simple_optimizer is not True and self.framework_str == "torch": + self.simple_optimizer = True + + if self.framework_str in ["tf", "tf2"] and tfp is None: + logger.warning( + "You need `tensorflow_probability` in order to run CQL! " + "Install it via `pip install tensorflow_probability`. Your " + f"tf.__version__={tf.__version__ if tf else None}." + "Trying to import tfp results in the following error:" + ) + try_import_tfp(error=True) + + # Assert that for a local learner the number of iterations is 1. Note, + # this is needed because we have no iterators, but instead a single + # batch returned directly from the `OfflineData.sample` method. + if ( + self.num_learners == 0 + and not self.dataset_num_iters_per_learner + and self.enable_rl_module_and_learner + ): + raise ValueError( + "When using a single local learner the number of iterations " + "per learner, `dataset_num_iters_per_learner` has to be defined. " + "Set this hyperparameter in the `AlgorithmConfig.offline_data`." + ) + + @override(SACConfig) + def get_default_rl_module_spec(self) -> RLModuleSpecType: + from ray.rllib.algorithms.sac.sac_catalog import SACCatalog + + if self.framework_str == "torch": + from ray.rllib.algorithms.cql.torch.cql_torch_rl_module import ( + CQLTorchRLModule, + ) + + return RLModuleSpec(module_class=CQLTorchRLModule, catalog_class=SACCatalog) + else: + raise ValueError( + f"The framework {self.framework_str} is not supported. " "Use `torch`." + ) + + @property + def _model_config_auto_includes(self): + return super()._model_config_auto_includes | { + "num_actions": self.num_actions, + } + + +class CQL(SAC): + """CQL (derived from SAC).""" + + @classmethod + @override(SAC) + def get_default_config(cls) -> AlgorithmConfig: + return CQLConfig() + + @classmethod + @override(SAC) + def get_default_policy_class( + cls, config: AlgorithmConfig + ) -> Optional[Type[Policy]]: + if config["framework"] == "torch": + return CQLTorchPolicy + else: + return CQLTFPolicy + + @override(SAC) + def training_step(self) -> None: + # Old API stack (Policy, RolloutWorker, Connector). + if not self.config.enable_env_runner_and_connector_v2: + return self._training_step_old_api_stack() + + # Sampling from offline data. + with self.metrics.log_time((TIMERS, OFFLINE_SAMPLING_TIMER)): + # Return an iterator in case we are using remote learners. + batch_or_iterator = self.offline_data.sample( + num_samples=self.config.train_batch_size_per_learner, + num_shards=self.config.num_learners, + return_iterator=self.config.num_learners > 1, + ) + + # Updating the policy. + with self.metrics.log_time((TIMERS, LEARNER_UPDATE_TIMER)): + # TODO (simon, sven): Check, if we should execute directly s.th. like + # `LearnerGroup.update_from_iterator()`. + learner_results = self.learner_group._update( + batch=batch_or_iterator, + minibatch_size=self.config.train_batch_size_per_learner, + num_iters=self.config.dataset_num_iters_per_learner, + ) + + # Log training results. + self.metrics.merge_and_log_n_dicts(learner_results, key=LEARNER_RESULTS) + + # Synchronize weights. + # As the results contain for each policy the loss and in addition the + # total loss over all policies is returned, this total loss has to be + # removed. + modules_to_update = set(learner_results[0].keys()) - {ALL_MODULES} + + # Update weights - after learning on the local worker - + # on all remote workers. Note, we only have the local `EnvRunner`, + # but from this `EnvRunner` the evaulation `EnvRunner`s get updated. + with self.metrics.log_time((TIMERS, SYNCH_WORKER_WEIGHTS_TIMER)): + self.env_runner_group.sync_weights( + # Sync weights from learner_group to all EnvRunners. + from_worker_or_learner_group=self.learner_group, + policies=modules_to_update, + inference_only=True, + ) + + @OldAPIStack + def _training_step_old_api_stack(self) -> ResultDict: + # Collect SampleBatches from sample workers. + with self._timers[SAMPLE_TIMER]: + train_batch = synchronous_parallel_sample(worker_set=self.env_runner_group) + train_batch = train_batch.as_multi_agent() + self._counters[NUM_AGENT_STEPS_SAMPLED] += train_batch.agent_steps() + self._counters[NUM_ENV_STEPS_SAMPLED] += train_batch.env_steps() + + # Postprocess batch before we learn on it. + post_fn = self.config.get("before_learn_on_batch") or (lambda b, *a: b) + train_batch = post_fn(train_batch, self.env_runner_group, self.config) + + # Learn on training batch. + # Use simple optimizer (only for multi-agent or tf-eager; all other + # cases should use the multi-GPU optimizer, even if only using 1 GPU) + if self.config.get("simple_optimizer") is True: + train_results = train_one_step(self, train_batch) + else: + train_results = multi_gpu_train_one_step(self, train_batch) + + # Update target network every `target_network_update_freq` training steps. + cur_ts = self._counters[ + NUM_AGENT_STEPS_TRAINED + if self.config.count_steps_by == "agent_steps" + else NUM_ENV_STEPS_TRAINED + ] + last_update = self._counters[LAST_TARGET_UPDATE_TS] + if cur_ts - last_update >= self.config.target_network_update_freq: + with self._timers[TARGET_NET_UPDATE_TIMER]: + to_update = self.env_runner.get_policies_to_train() + self.env_runner.foreach_policy_to_train( + lambda p, pid: pid in to_update and p.update_target() + ) + self._counters[NUM_TARGET_UPDATES] += 1 + self._counters[LAST_TARGET_UPDATE_TS] = cur_ts + + # Update remote workers's weights after learning on local worker + # (only those policies that were actually trained). + if self.env_runner_group.num_remote_workers() > 0: + with self._timers[SYNCH_WORKER_WEIGHTS_TIMER]: + self.env_runner_group.sync_weights(policies=list(train_results.keys())) + + # Return all collected metrics for the iteration. + return train_results diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/cql/cql_tf_policy.py b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/cql/cql_tf_policy.py new file mode 100644 index 0000000000000000000000000000000000000000..2aaecf01e2be0d17b1bd58c56efc170c7be93757 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/cql/cql_tf_policy.py @@ -0,0 +1,426 @@ +""" +TensorFlow policy class used for CQL. +""" +from functools import partial +import numpy as np +import gymnasium as gym +import logging +import tree +from typing import Dict, List, Type, Union + +import ray +import ray.experimental.tf_utils +from ray.rllib.algorithms.sac.sac_tf_policy import ( + apply_gradients as sac_apply_gradients, + compute_and_clip_gradients as sac_compute_and_clip_gradients, + get_distribution_inputs_and_class, + _get_dist_class, + build_sac_model, + postprocess_trajectory, + setup_late_mixins, + stats, + validate_spaces, + ActorCriticOptimizerMixin as SACActorCriticOptimizerMixin, + ComputeTDErrorMixin, +) +from ray.rllib.models.modelv2 import ModelV2 +from ray.rllib.models.tf.tf_action_dist import TFActionDistribution +from ray.rllib.policy.tf_mixins import TargetNetworkMixin +from ray.rllib.policy.tf_policy_template import build_tf_policy +from ray.rllib.policy.policy import Policy +from ray.rllib.policy.sample_batch import SampleBatch +from ray.rllib.utils.exploration.random import Random +from ray.rllib.utils.framework import get_variable, try_import_tf, try_import_tfp +from ray.rllib.utils.typing import ( + LocalOptimizer, + ModelGradients, + TensorType, + AlgorithmConfigDict, +) + +tf1, tf, tfv = try_import_tf() +tfp = try_import_tfp() + +logger = logging.getLogger(__name__) + +MEAN_MIN = -9.0 +MEAN_MAX = 9.0 + + +def _repeat_tensor(t: TensorType, n: int): + # Insert new axis at position 1 into tensor t + t_rep = tf.expand_dims(t, 1) + # Repeat tensor t_rep along new axis n times + multiples = tf.concat([[1, n], tf.tile([1], tf.expand_dims(tf.rank(t) - 1, 0))], 0) + t_rep = tf.tile(t_rep, multiples) + # Merge new axis into batch axis + t_rep = tf.reshape(t_rep, tf.concat([[-1], tf.shape(t)[1:]], 0)) + return t_rep + + +# Returns policy tiled actions and log probabilities for CQL Loss +def policy_actions_repeat(model, action_dist, obs, num_repeat=1): + batch_size = tf.shape(tree.flatten(obs)[0])[0] + obs_temp = tree.map_structure(lambda t: _repeat_tensor(t, num_repeat), obs) + logits, _ = model.get_action_model_outputs(obs_temp) + policy_dist = action_dist(logits, model) + actions, logp_ = policy_dist.sample_logp() + logp = tf.expand_dims(logp_, -1) + return actions, tf.reshape(logp, [batch_size, num_repeat, 1]) + + +def q_values_repeat(model, obs, actions, twin=False): + action_shape = tf.shape(actions)[0] + obs_shape = tf.shape(tree.flatten(obs)[0])[0] + num_repeat = action_shape // obs_shape + obs_temp = tree.map_structure(lambda t: _repeat_tensor(t, num_repeat), obs) + if not twin: + preds_, _ = model.get_q_values(obs_temp, actions) + else: + preds_, _ = model.get_twin_q_values(obs_temp, actions) + preds = tf.reshape(preds_, [obs_shape, num_repeat, 1]) + return preds + + +def cql_loss( + policy: Policy, + model: ModelV2, + dist_class: Type[TFActionDistribution], + train_batch: SampleBatch, +) -> Union[TensorType, List[TensorType]]: + logger.info(f"Current iteration = {policy.cur_iter}") + policy.cur_iter += 1 + + # For best performance, turn deterministic off + deterministic = policy.config["_deterministic_loss"] + assert not deterministic + twin_q = policy.config["twin_q"] + discount = policy.config["gamma"] + + # CQL Parameters + bc_iters = policy.config["bc_iters"] + cql_temp = policy.config["temperature"] + num_actions = policy.config["num_actions"] + min_q_weight = policy.config["min_q_weight"] + use_lagrange = policy.config["lagrangian"] + target_action_gap = policy.config["lagrangian_thresh"] + + obs = train_batch[SampleBatch.CUR_OBS] + actions = tf.cast(train_batch[SampleBatch.ACTIONS], tf.float32) + rewards = tf.cast(train_batch[SampleBatch.REWARDS], tf.float32) + next_obs = train_batch[SampleBatch.NEXT_OBS] + terminals = train_batch[SampleBatch.TERMINATEDS] + + model_out_t, _ = model(SampleBatch(obs=obs, _is_training=True), [], None) + + model_out_tp1, _ = model(SampleBatch(obs=next_obs, _is_training=True), [], None) + + target_model_out_tp1, _ = policy.target_model( + SampleBatch(obs=next_obs, _is_training=True), [], None + ) + + action_dist_class = _get_dist_class(policy, policy.config, policy.action_space) + action_dist_inputs_t, _ = model.get_action_model_outputs(model_out_t) + action_dist_t = action_dist_class(action_dist_inputs_t, model) + policy_t, log_pis_t = action_dist_t.sample_logp() + log_pis_t = tf.expand_dims(log_pis_t, -1) + + # Unlike original SAC, Alpha and Actor Loss are computed first. + # Alpha Loss + alpha_loss = -tf.reduce_mean( + model.log_alpha * tf.stop_gradient(log_pis_t + model.target_entropy) + ) + + # Policy Loss (Either Behavior Clone Loss or SAC Loss) + alpha = tf.math.exp(model.log_alpha) + if policy.cur_iter >= bc_iters: + min_q, _ = model.get_q_values(model_out_t, policy_t) + if twin_q: + twin_q_, _ = model.get_twin_q_values(model_out_t, policy_t) + min_q = tf.math.minimum(min_q, twin_q_) + actor_loss = tf.reduce_mean(tf.stop_gradient(alpha) * log_pis_t - min_q) + else: + bc_logp = action_dist_t.logp(actions) + actor_loss = tf.reduce_mean(tf.stop_gradient(alpha) * log_pis_t - bc_logp) + # actor_loss = -tf.reduce_mean(bc_logp) + + # Critic Loss (Standard SAC Critic L2 Loss + CQL Entropy Loss) + # SAC Loss: + # Q-values for the batched actions. + action_dist_inputs_tp1, _ = model.get_action_model_outputs(model_out_tp1) + action_dist_tp1 = action_dist_class(action_dist_inputs_tp1, model) + policy_tp1, _ = action_dist_tp1.sample_logp() + + q_t, _ = model.get_q_values(model_out_t, actions) + q_t_selected = tf.squeeze(q_t, axis=-1) + if twin_q: + twin_q_t, _ = model.get_twin_q_values(model_out_t, actions) + twin_q_t_selected = tf.squeeze(twin_q_t, axis=-1) + + # Target q network evaluation. + q_tp1, _ = policy.target_model.get_q_values(target_model_out_tp1, policy_tp1) + if twin_q: + twin_q_tp1, _ = policy.target_model.get_twin_q_values( + target_model_out_tp1, policy_tp1 + ) + # Take min over both twin-NNs. + q_tp1 = tf.math.minimum(q_tp1, twin_q_tp1) + + q_tp1_best = tf.squeeze(input=q_tp1, axis=-1) + q_tp1_best_masked = (1.0 - tf.cast(terminals, tf.float32)) * q_tp1_best + + # compute RHS of bellman equation + q_t_target = tf.stop_gradient( + rewards + (discount ** policy.config["n_step"]) * q_tp1_best_masked + ) + + # Compute the TD-error (potentially clipped), for priority replay buffer + base_td_error = tf.math.abs(q_t_selected - q_t_target) + if twin_q: + twin_td_error = tf.math.abs(twin_q_t_selected - q_t_target) + td_error = 0.5 * (base_td_error + twin_td_error) + else: + td_error = base_td_error + + critic_loss_1 = tf.keras.losses.MSE(q_t_selected, q_t_target) + if twin_q: + critic_loss_2 = tf.keras.losses.MSE(twin_q_t_selected, q_t_target) + + # CQL Loss (We are using Entropy version of CQL (the best version)) + rand_actions, _ = policy._random_action_generator.get_exploration_action( + action_distribution=action_dist_class( + tf.tile(action_dist_tp1.inputs, (num_actions, 1)), model + ), + timestep=0, + explore=True, + ) + curr_actions, curr_logp = policy_actions_repeat( + model, action_dist_class, model_out_t, num_actions + ) + next_actions, next_logp = policy_actions_repeat( + model, action_dist_class, model_out_tp1, num_actions + ) + + q1_rand = q_values_repeat(model, model_out_t, rand_actions) + q1_curr_actions = q_values_repeat(model, model_out_t, curr_actions) + q1_next_actions = q_values_repeat(model, model_out_t, next_actions) + + if twin_q: + q2_rand = q_values_repeat(model, model_out_t, rand_actions, twin=True) + q2_curr_actions = q_values_repeat(model, model_out_t, curr_actions, twin=True) + q2_next_actions = q_values_repeat(model, model_out_t, next_actions, twin=True) + + random_density = np.log(0.5 ** int(curr_actions.shape[-1])) + cat_q1 = tf.concat( + [ + q1_rand - random_density, + q1_next_actions - tf.stop_gradient(next_logp), + q1_curr_actions - tf.stop_gradient(curr_logp), + ], + 1, + ) + if twin_q: + cat_q2 = tf.concat( + [ + q2_rand - random_density, + q2_next_actions - tf.stop_gradient(next_logp), + q2_curr_actions - tf.stop_gradient(curr_logp), + ], + 1, + ) + + min_qf1_loss_ = ( + tf.reduce_mean(tf.reduce_logsumexp(cat_q1 / cql_temp, axis=1)) + * min_q_weight + * cql_temp + ) + min_qf1_loss = min_qf1_loss_ - (tf.reduce_mean(q_t) * min_q_weight) + if twin_q: + min_qf2_loss_ = ( + tf.reduce_mean(tf.reduce_logsumexp(cat_q2 / cql_temp, axis=1)) + * min_q_weight + * cql_temp + ) + min_qf2_loss = min_qf2_loss_ - (tf.reduce_mean(twin_q_t) * min_q_weight) + + if use_lagrange: + alpha_prime = tf.clip_by_value(model.log_alpha_prime.exp(), 0.0, 1000000.0)[0] + min_qf1_loss = alpha_prime * (min_qf1_loss - target_action_gap) + if twin_q: + min_qf2_loss = alpha_prime * (min_qf2_loss - target_action_gap) + alpha_prime_loss = 0.5 * (-min_qf1_loss - min_qf2_loss) + else: + alpha_prime_loss = -min_qf1_loss + + cql_loss = [min_qf1_loss] + if twin_q: + cql_loss.append(min_qf2_loss) + + critic_loss = [critic_loss_1 + min_qf1_loss] + if twin_q: + critic_loss.append(critic_loss_2 + min_qf2_loss) + + # Save for stats function. + policy.q_t = q_t_selected + policy.policy_t = policy_t + policy.log_pis_t = log_pis_t + policy.td_error = td_error + policy.actor_loss = actor_loss + policy.critic_loss = critic_loss + policy.alpha_loss = alpha_loss + policy.log_alpha_value = model.log_alpha + policy.alpha_value = alpha + policy.target_entropy = model.target_entropy + # CQL Stats + policy.cql_loss = cql_loss + if use_lagrange: + policy.log_alpha_prime_value = model.log_alpha_prime[0] + policy.alpha_prime_value = alpha_prime + policy.alpha_prime_loss = alpha_prime_loss + + # Return all loss terms corresponding to our optimizers. + if use_lagrange: + return actor_loss + tf.math.add_n(critic_loss) + alpha_loss + alpha_prime_loss + return actor_loss + tf.math.add_n(critic_loss) + alpha_loss + + +def cql_stats(policy: Policy, train_batch: SampleBatch) -> Dict[str, TensorType]: + sac_dict = stats(policy, train_batch) + sac_dict["cql_loss"] = tf.reduce_mean(tf.stack(policy.cql_loss)) + if policy.config["lagrangian"]: + sac_dict["log_alpha_prime_value"] = policy.log_alpha_prime_value + sac_dict["alpha_prime_value"] = policy.alpha_prime_value + sac_dict["alpha_prime_loss"] = policy.alpha_prime_loss + return sac_dict + + +class ActorCriticOptimizerMixin(SACActorCriticOptimizerMixin): + def __init__(self, config): + super().__init__(config) + if config["lagrangian"]: + # Eager mode. + if config["framework"] == "tf2": + self._alpha_prime_optimizer = tf.keras.optimizers.Adam( + learning_rate=config["optimization"]["critic_learning_rate"] + ) + # Static graph mode. + else: + self._alpha_prime_optimizer = tf1.train.AdamOptimizer( + learning_rate=config["optimization"]["critic_learning_rate"] + ) + + +def setup_early_mixins( + policy: Policy, + obs_space: gym.spaces.Space, + action_space: gym.spaces.Space, + config: AlgorithmConfigDict, +) -> None: + """Call mixin classes' constructors before Policy's initialization. + + Adds the necessary optimizers to the given Policy. + + Args: + policy: The Policy object. + obs_space (gym.spaces.Space): The Policy's observation space. + action_space (gym.spaces.Space): The Policy's action space. + config: The Policy's config. + """ + policy.cur_iter = 0 + ActorCriticOptimizerMixin.__init__(policy, config) + if config["lagrangian"]: + policy.model.log_alpha_prime = get_variable( + 0.0, framework="tf", trainable=True, tf_name="log_alpha_prime" + ) + policy.alpha_prime_optim = tf.keras.optimizers.Adam( + learning_rate=config["optimization"]["critic_learning_rate"], + ) + # Generic random action generator for calculating CQL-loss. + policy._random_action_generator = Random( + action_space, + model=None, + framework="tf2", + policy_config=config, + num_workers=0, + worker_index=0, + ) + + +def compute_gradients_fn( + policy: Policy, optimizer: LocalOptimizer, loss: TensorType +) -> ModelGradients: + grads_and_vars = sac_compute_and_clip_gradients(policy, optimizer, loss) + + if policy.config["lagrangian"]: + # Eager: Use GradientTape (which is a property of the `optimizer` + # object (an OptimizerWrapper): see rllib/policy/eager_tf_policy.py). + if policy.config["framework"] == "tf2": + tape = optimizer.tape + log_alpha_prime = [policy.model.log_alpha_prime] + alpha_prime_grads_and_vars = list( + zip( + tape.gradient(policy.alpha_prime_loss, log_alpha_prime), + log_alpha_prime, + ) + ) + # Tf1.x: Use optimizer.compute_gradients() + else: + alpha_prime_grads_and_vars = ( + policy._alpha_prime_optimizer.compute_gradients( + policy.alpha_prime_loss, var_list=[policy.model.log_alpha_prime] + ) + ) + + # Clip if necessary. + if policy.config["grad_clip"]: + clip_func = partial(tf.clip_by_norm, clip_norm=policy.config["grad_clip"]) + else: + clip_func = tf.identity + + # Save grads and vars for later use in `build_apply_op`. + policy._alpha_prime_grads_and_vars = [ + (clip_func(g), v) for (g, v) in alpha_prime_grads_and_vars if g is not None + ] + + grads_and_vars += policy._alpha_prime_grads_and_vars + return grads_and_vars + + +def apply_gradients_fn(policy, optimizer, grads_and_vars): + sac_results = sac_apply_gradients(policy, optimizer, grads_and_vars) + + if policy.config["lagrangian"]: + # Eager mode -> Just apply and return None. + if policy.config["framework"] == "tf2": + policy._alpha_prime_optimizer.apply_gradients( + policy._alpha_prime_grads_and_vars + ) + return + # Tf static graph -> Return grouped op. + else: + alpha_prime_apply_op = policy._alpha_prime_optimizer.apply_gradients( + policy._alpha_prime_grads_and_vars, + global_step=tf1.train.get_or_create_global_step(), + ) + return tf.group([sac_results, alpha_prime_apply_op]) + return sac_results + + +# Build a child class of `TFPolicy`, given the custom functions defined +# above. +CQLTFPolicy = build_tf_policy( + name="CQLTFPolicy", + loss_fn=cql_loss, + get_default_config=lambda: ray.rllib.algorithms.cql.cql.CQLConfig(), + validate_spaces=validate_spaces, + stats_fn=cql_stats, + postprocess_fn=postprocess_trajectory, + before_init=setup_early_mixins, + after_init=setup_late_mixins, + make_model=build_sac_model, + extra_learn_fetches_fn=lambda policy: {"td_error": policy.td_error}, + mixins=[ActorCriticOptimizerMixin, TargetNetworkMixin, ComputeTDErrorMixin], + action_distribution_fn=get_distribution_inputs_and_class, + compute_gradients_fn=compute_gradients_fn, + apply_gradients_fn=apply_gradients_fn, +) diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/cql/torch/__pycache__/cql_torch_learner.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/cql/torch/__pycache__/cql_torch_learner.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0fd2445754b887ca71488208d9c56870604da32 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/cql/torch/__pycache__/cql_torch_learner.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/cql/torch/__pycache__/cql_torch_rl_module.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/cql/torch/__pycache__/cql_torch_rl_module.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19d0519da38773e0f75353127cd74ec1278e4dba Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/cql/torch/__pycache__/cql_torch_rl_module.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/__init__.py b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f6046b585028664e7351193d3ece2e291aa78278 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/__init__.py @@ -0,0 +1,10 @@ +from ray.rllib.algorithms.dqn.dqn import DQN, DQNConfig +from ray.rllib.algorithms.dqn.dqn_tf_policy import DQNTFPolicy +from ray.rllib.algorithms.dqn.dqn_torch_policy import DQNTorchPolicy + +__all__ = [ + "DQN", + "DQNConfig", + "DQNTFPolicy", + "DQNTorchPolicy", +] diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/__pycache__/dqn_rainbow_catalog.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/__pycache__/dqn_rainbow_catalog.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67d618345eb7bbf3da23440e31c2d6685b6d1203 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/__pycache__/dqn_rainbow_catalog.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/__pycache__/dqn_rainbow_learner.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/__pycache__/dqn_rainbow_learner.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a4835c277afdb2188f9cb15c132045f8850de85 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/__pycache__/dqn_rainbow_learner.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/__pycache__/dqn_torch_policy.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/__pycache__/dqn_torch_policy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26d6ba665be40c5ce8d76b954b1f021ebe9b3561 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/__pycache__/dqn_torch_policy.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/__pycache__/learner_thread.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/__pycache__/learner_thread.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec77f9ce8d0a8e5fa9423a091b5424978e50d42c Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/__pycache__/learner_thread.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/dqn.py b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/dqn.py new file mode 100644 index 0000000000000000000000000000000000000000..2c5e95602bdbe360fac09922ad8f0b7d4e9b8b10 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/dqn.py @@ -0,0 +1,862 @@ +""" +Deep Q-Networks (DQN, Rainbow, Parametric DQN) +============================================== + +This file defines the distributed Algorithm class for the Deep Q-Networks +algorithm. See `dqn_[tf|torch]_policy.py` for the definition of the policies. + +Detailed documentation: +https://docs.ray.io/en/master/rllib-algorithms.html#deep-q-networks-dqn-rainbow-parametric-dqn +""" # noqa: E501 + +from collections import defaultdict +import logging +from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union +import numpy as np + +from ray.rllib.algorithms.algorithm import Algorithm +from ray.rllib.algorithms.algorithm_config import AlgorithmConfig, NotProvided +from ray.rllib.algorithms.dqn.dqn_tf_policy import DQNTFPolicy +from ray.rllib.algorithms.dqn.dqn_torch_policy import DQNTorchPolicy +from ray.rllib.connectors.common.add_observations_from_episodes_to_batch import ( + AddObservationsFromEpisodesToBatch, +) +from ray.rllib.connectors.learner.add_next_observations_from_episodes_to_train_batch import ( # noqa + AddNextObservationsFromEpisodesToTrainBatch, +) +from ray.rllib.core.learner import Learner +from ray.rllib.core.rl_module.rl_module import RLModuleSpec +from ray.rllib.execution.rollout_ops import ( + synchronous_parallel_sample, +) +from ray.rllib.policy.sample_batch import MultiAgentBatch +from ray.rllib.execution.train_ops import ( + train_one_step, + multi_gpu_train_one_step, +) +from ray.rllib.policy.policy import Policy +from ray.rllib.utils import deep_update +from ray.rllib.utils.annotations import override +from ray.rllib.utils.numpy import convert_to_numpy +from ray.rllib.utils.replay_buffers.utils import ( + update_priorities_in_episode_replay_buffer, + update_priorities_in_replay_buffer, + validate_buffer_config, +) +from ray.rllib.utils.typing import ResultDict +from ray.rllib.utils.metrics import ( + ALL_MODULES, + ENV_RUNNER_RESULTS, + ENV_RUNNER_SAMPLING_TIMER, + LAST_TARGET_UPDATE_TS, + LEARNER_RESULTS, + LEARNER_UPDATE_TIMER, + NUM_AGENT_STEPS_SAMPLED, + NUM_AGENT_STEPS_SAMPLED_LIFETIME, + NUM_ENV_STEPS_SAMPLED, + NUM_ENV_STEPS_SAMPLED_LIFETIME, + NUM_TARGET_UPDATES, + REPLAY_BUFFER_ADD_DATA_TIMER, + REPLAY_BUFFER_SAMPLE_TIMER, + REPLAY_BUFFER_UPDATE_PRIOS_TIMER, + SAMPLE_TIMER, + SYNCH_WORKER_WEIGHTS_TIMER, + TD_ERROR_KEY, + TIMERS, +) +from ray.rllib.utils.deprecation import DEPRECATED_VALUE +from ray.rllib.utils.replay_buffers.utils import sample_min_n_steps_from_buffer +from ray.rllib.utils.typing import ( + LearningRateOrSchedule, + RLModuleSpecType, + SampleBatchType, +) + +logger = logging.getLogger(__name__) + + +class DQNConfig(AlgorithmConfig): + r"""Defines a configuration class from which a DQN Algorithm can be built. + + .. testcode:: + + from ray.rllib.algorithms.dqn.dqn import DQNConfig + + config = ( + DQNConfig() + .environment("CartPole-v1") + .training(replay_buffer_config={ + "type": "PrioritizedEpisodeReplayBuffer", + "capacity": 60000, + "alpha": 0.5, + "beta": 0.5, + }) + .env_runners(num_env_runners=1) + ) + algo = config.build() + algo.train() + algo.stop() + + .. testcode:: + + from ray.rllib.algorithms.dqn.dqn import DQNConfig + from ray import air + from ray import tune + + config = ( + DQNConfig() + .environment("CartPole-v1") + .training( + num_atoms=tune.grid_search([1,]) + ) + ) + tune.Tuner( + "DQN", + run_config=air.RunConfig(stop={"training_iteration":1}), + param_space=config, + ).fit() + + .. testoutput:: + :hide: + + ... + + + """ + + def __init__(self, algo_class=None): + """Initializes a DQNConfig instance.""" + self.exploration_config = { + "type": "EpsilonGreedy", + "initial_epsilon": 1.0, + "final_epsilon": 0.02, + "epsilon_timesteps": 10000, + } + + super().__init__(algo_class=algo_class or DQN) + + # Overrides of AlgorithmConfig defaults + # `env_runners()` + # Set to `self.n_step`, if 'auto'. + self.rollout_fragment_length: Union[int, str] = "auto" + # New stack uses `epsilon` as either a constant value or a scheduler + # defined like this. + # TODO (simon): Ensure that users can understand how to provide epsilon. + # (sven): Should we add this to `self.env_runners(epsilon=..)`? + self.epsilon = [(0, 1.0), (10000, 0.05)] + + # `training()` + self.grad_clip = 40.0 + # Note: Only when using enable_rl_module_and_learner=True can the clipping mode + # be configured by the user. On the old API stack, RLlib will always clip by + # global_norm, no matter the value of `grad_clip_by`. + self.grad_clip_by = "global_norm" + self.lr = 5e-4 + self.train_batch_size = 32 + + # `evaluation()` + self.evaluation(evaluation_config=AlgorithmConfig.overrides(explore=False)) + + # `reporting()` + self.min_time_s_per_iteration = None + self.min_sample_timesteps_per_iteration = 1000 + + # DQN specific config settings. + # fmt: off + # __sphinx_doc_begin__ + self.target_network_update_freq = 500 + self.num_steps_sampled_before_learning_starts = 1000 + self.store_buffer_in_checkpoints = False + self.adam_epsilon = 1e-8 + + self.tau = 1.0 + + self.num_atoms = 1 + self.v_min = -10.0 + self.v_max = 10.0 + self.noisy = False + self.sigma0 = 0.5 + self.dueling = True + self.hiddens = [256] + self.double_q = True + self.n_step = 1 + self.before_learn_on_batch = None + self.training_intensity = None + self.td_error_loss_fn = "huber" + self.categorical_distribution_temperature = 1.0 + + # Replay buffer configuration. + self.replay_buffer_config = { + "type": "PrioritizedEpisodeReplayBuffer", + # Size of the replay buffer. Note that if async_updates is set, + # then each worker will have a replay buffer of this size. + "capacity": 50000, + "alpha": 0.6, + # Beta parameter for sampling from prioritized replay buffer. + "beta": 0.4, + } + # fmt: on + # __sphinx_doc_end__ + + self.lr_schedule = None # @OldAPIStack + + # Deprecated + self.buffer_size = DEPRECATED_VALUE + self.prioritized_replay = DEPRECATED_VALUE + self.learning_starts = DEPRECATED_VALUE + self.replay_batch_size = DEPRECATED_VALUE + # Can not use DEPRECATED_VALUE here because -1 is a common config value + self.replay_sequence_length = None + self.prioritized_replay_alpha = DEPRECATED_VALUE + self.prioritized_replay_beta = DEPRECATED_VALUE + self.prioritized_replay_eps = DEPRECATED_VALUE + + @override(AlgorithmConfig) + def training( + self, + *, + target_network_update_freq: Optional[int] = NotProvided, + replay_buffer_config: Optional[dict] = NotProvided, + store_buffer_in_checkpoints: Optional[bool] = NotProvided, + lr_schedule: Optional[List[List[Union[int, float]]]] = NotProvided, + epsilon: Optional[LearningRateOrSchedule] = NotProvided, + adam_epsilon: Optional[float] = NotProvided, + grad_clip: Optional[int] = NotProvided, + num_steps_sampled_before_learning_starts: Optional[int] = NotProvided, + tau: Optional[float] = NotProvided, + num_atoms: Optional[int] = NotProvided, + v_min: Optional[float] = NotProvided, + v_max: Optional[float] = NotProvided, + noisy: Optional[bool] = NotProvided, + sigma0: Optional[float] = NotProvided, + dueling: Optional[bool] = NotProvided, + hiddens: Optional[int] = NotProvided, + double_q: Optional[bool] = NotProvided, + n_step: Optional[Union[int, Tuple[int, int]]] = NotProvided, + before_learn_on_batch: Callable[ + [Type[MultiAgentBatch], List[Type[Policy]], Type[int]], + Type[MultiAgentBatch], + ] = NotProvided, + training_intensity: Optional[float] = NotProvided, + td_error_loss_fn: Optional[str] = NotProvided, + categorical_distribution_temperature: Optional[float] = NotProvided, + **kwargs, + ) -> "DQNConfig": + """Sets the training related configuration. + + Args: + target_network_update_freq: Update the target network every + `target_network_update_freq` sample steps. + replay_buffer_config: Replay buffer config. + Examples: + { + "_enable_replay_buffer_api": True, + "type": "MultiAgentReplayBuffer", + "capacity": 50000, + "replay_sequence_length": 1, + } + - OR - + { + "_enable_replay_buffer_api": True, + "type": "MultiAgentPrioritizedReplayBuffer", + "capacity": 50000, + "prioritized_replay_alpha": 0.6, + "prioritized_replay_beta": 0.4, + "prioritized_replay_eps": 1e-6, + "replay_sequence_length": 1, + } + - Where - + prioritized_replay_alpha: Alpha parameter controls the degree of + prioritization in the buffer. In other words, when a buffer sample has + a higher temporal-difference error, with how much more probability + should it drawn to use to update the parametrized Q-network. 0.0 + corresponds to uniform probability. Setting much above 1.0 may quickly + result as the sampling distribution could become heavily “pointy” with + low entropy. + prioritized_replay_beta: Beta parameter controls the degree of + importance sampling which suppresses the influence of gradient updates + from samples that have higher probability of being sampled via alpha + parameter and the temporal-difference error. + prioritized_replay_eps: Epsilon parameter sets the baseline probability + for sampling so that when the temporal-difference error of a sample is + zero, there is still a chance of drawing the sample. + store_buffer_in_checkpoints: Set this to True, if you want the contents of + your buffer(s) to be stored in any saved checkpoints as well. + Warnings will be created if: + - This is True AND restoring from a checkpoint that contains no buffer + data. + - This is False AND restoring from a checkpoint that does contain + buffer data. + epsilon: Epsilon exploration schedule. In the format of [[timestep, value], + [timestep, value], ...]. A schedule must start from + timestep 0. + adam_epsilon: Adam optimizer's epsilon hyper parameter. + grad_clip: If not None, clip gradients during optimization at this value. + num_steps_sampled_before_learning_starts: Number of timesteps to collect + from rollout workers before we start sampling from replay buffers for + learning. Whether we count this in agent steps or environment steps + depends on config.multi_agent(count_steps_by=..). + tau: Update the target by \tau * policy + (1-\tau) * target_policy. + num_atoms: Number of atoms for representing the distribution of return. + When this is greater than 1, distributional Q-learning is used. + v_min: Minimum value estimation + v_max: Maximum value estimation + noisy: Whether to use noisy network to aid exploration. This adds parametric + noise to the model weights. + sigma0: Control the initial parameter noise for noisy nets. + dueling: Whether to use dueling DQN. + hiddens: Dense-layer setup for each the advantage branch and the value + branch + double_q: Whether to use double DQN. + n_step: N-step target updates. If >1, sars' tuples in trajectories will be + postprocessed to become sa[discounted sum of R][s t+n] tuples. An + integer will be interpreted as a fixed n-step value. If a tuple of 2 + ints is provided here, the n-step value will be drawn for each sample(!) + in the train batch from a uniform distribution over the closed interval + defined by `[n_step[0], n_step[1]]`. + before_learn_on_batch: Callback to run before learning on a multi-agent + batch of experiences. + training_intensity: The intensity with which to update the model (vs + collecting samples from the env). + If None, uses "natural" values of: + `train_batch_size` / (`rollout_fragment_length` x `num_env_runners` x + `num_envs_per_env_runner`). + If not None, will make sure that the ratio between timesteps inserted + into and sampled from the buffer matches the given values. + Example: + training_intensity=1000.0 + train_batch_size=250 + rollout_fragment_length=1 + num_env_runners=1 (or 0) + num_envs_per_env_runner=1 + -> natural value = 250 / 1 = 250.0 + -> will make sure that replay+train op will be executed 4x asoften as + rollout+insert op (4 * 250 = 1000). + See: rllib/algorithms/dqn/dqn.py::calculate_rr_weights for further + details. + td_error_loss_fn: "huber" or "mse". loss function for calculating TD error + when num_atoms is 1. Note that if num_atoms is > 1, this parameter + is simply ignored, and softmax cross entropy loss will be used. + categorical_distribution_temperature: Set the temperature parameter used + by Categorical action distribution. A valid temperature is in the range + of [0, 1]. Note that this mostly affects evaluation since TD error uses + argmax for return calculation. + + Returns: + This updated AlgorithmConfig object. + """ + # Pass kwargs onto super's `training()` method. + super().training(**kwargs) + + if target_network_update_freq is not NotProvided: + self.target_network_update_freq = target_network_update_freq + if replay_buffer_config is not NotProvided: + # Override entire `replay_buffer_config` if `type` key changes. + # Update, if `type` key remains the same or is not specified. + new_replay_buffer_config = deep_update( + {"replay_buffer_config": self.replay_buffer_config}, + {"replay_buffer_config": replay_buffer_config}, + False, + ["replay_buffer_config"], + ["replay_buffer_config"], + ) + self.replay_buffer_config = new_replay_buffer_config["replay_buffer_config"] + if store_buffer_in_checkpoints is not NotProvided: + self.store_buffer_in_checkpoints = store_buffer_in_checkpoints + if lr_schedule is not NotProvided: + self.lr_schedule = lr_schedule + if epsilon is not NotProvided: + self.epsilon = epsilon + if adam_epsilon is not NotProvided: + self.adam_epsilon = adam_epsilon + if grad_clip is not NotProvided: + self.grad_clip = grad_clip + if num_steps_sampled_before_learning_starts is not NotProvided: + self.num_steps_sampled_before_learning_starts = ( + num_steps_sampled_before_learning_starts + ) + if tau is not NotProvided: + self.tau = tau + if num_atoms is not NotProvided: + self.num_atoms = num_atoms + if v_min is not NotProvided: + self.v_min = v_min + if v_max is not NotProvided: + self.v_max = v_max + if noisy is not NotProvided: + self.noisy = noisy + if sigma0 is not NotProvided: + self.sigma0 = sigma0 + if dueling is not NotProvided: + self.dueling = dueling + if hiddens is not NotProvided: + self.hiddens = hiddens + if double_q is not NotProvided: + self.double_q = double_q + if n_step is not NotProvided: + self.n_step = n_step + if before_learn_on_batch is not NotProvided: + self.before_learn_on_batch = before_learn_on_batch + if training_intensity is not NotProvided: + self.training_intensity = training_intensity + if td_error_loss_fn is not NotProvided: + self.td_error_loss_fn = td_error_loss_fn + if categorical_distribution_temperature is not NotProvided: + self.categorical_distribution_temperature = ( + categorical_distribution_temperature + ) + + return self + + @override(AlgorithmConfig) + def validate(self) -> None: + # Call super's validation method. + super().validate() + + # Warn about new API stack on by default. + if self.enable_rl_module_and_learner: + logger.warning( + f"You are running {self.algo_class.__name__} on the new API stack! " + "This is the new default behavior for this algorithm. If you don't " + "want to use the new API stack, set `config.api_stack(" + "enable_rl_module_and_learner=False," + "enable_env_runner_and_connector_v2=False)`. For a detailed migration " + "guide, see here: https://docs.ray.io/en/master/rllib/new-api-stack-migration-guide.html" # noqa + ) + + if ( + not self.enable_rl_module_and_learner + and self.exploration_config["type"] == "ParameterNoise" + ): + if self.batch_mode != "complete_episodes": + raise ValueError( + "ParameterNoise Exploration requires `batch_mode` to be " + "'complete_episodes'. Try setting `config.env_runners(" + "batch_mode='complete_episodes')`." + ) + + if not self.enable_env_runner_and_connector_v2 and not self.in_evaluation: + validate_buffer_config(self) + + if self.td_error_loss_fn not in ["huber", "mse"]: + raise ValueError("`td_error_loss_fn` must be 'huber' or 'mse'!") + + # Check rollout_fragment_length to be compatible with n_step. + if ( + not self.in_evaluation + and self.rollout_fragment_length != "auto" + and self.rollout_fragment_length < self.n_step + ): + raise ValueError( + f"Your `rollout_fragment_length` ({self.rollout_fragment_length}) is " + f"smaller than `n_step` ({self.n_step})! " + "Try setting config.env_runners(rollout_fragment_length=" + f"{self.n_step})." + ) + + # TODO (simon): Find a clean solution to deal with + # configuration configs when using the new API stack. + if ( + not self.enable_rl_module_and_learner + and self.exploration_config["type"] == "ParameterNoise" + ): + if self.batch_mode != "complete_episodes": + raise ValueError( + "ParameterNoise Exploration requires `batch_mode` to be " + "'complete_episodes'. Try setting `config.env_runners(" + "batch_mode='complete_episodes')`." + ) + if self.noisy: + raise ValueError( + "ParameterNoise Exploration and `noisy` network cannot be" + " used at the same time!" + ) + + # Validate that we use the corresponding `EpisodeReplayBuffer` when using + # episodes. + # TODO (sven, simon): Implement the multi-agent case for replay buffers. + from ray.rllib.utils.replay_buffers.episode_replay_buffer import ( + EpisodeReplayBuffer, + ) + + if ( + self.enable_env_runner_and_connector_v2 + and not isinstance(self.replay_buffer_config["type"], str) + and not issubclass(self.replay_buffer_config["type"], EpisodeReplayBuffer) + ): + raise ValueError( + "When using the new `EnvRunner API` the replay buffer must be of type " + "`EpisodeReplayBuffer`." + ) + elif not self.enable_env_runner_and_connector_v2 and ( + ( + isinstance(self.replay_buffer_config["type"], str) + and "Episode" in self.replay_buffer_config["type"] + ) + or issubclass(self.replay_buffer_config["type"], EpisodeReplayBuffer) + ): + raise ValueError( + "When using the old API stack the replay buffer must not be of type " + "`EpisodeReplayBuffer`! We suggest you use the following config to run " + "DQN on the old API stack: `config.training(replay_buffer_config={" + "'type': 'MultiAgentPrioritizedReplayBuffer', " + "'prioritized_replay_alpha': [alpha], " + "'prioritized_replay_beta': [beta], " + "'prioritized_replay_eps': [eps], " + "})`." + ) + + @override(AlgorithmConfig) + def get_rollout_fragment_length(self, worker_index: int = 0) -> int: + if self.rollout_fragment_length == "auto": + return ( + self.n_step[1] + if isinstance(self.n_step, (tuple, list)) + else self.n_step + ) + else: + return self.rollout_fragment_length + + @override(AlgorithmConfig) + def get_default_rl_module_spec(self) -> RLModuleSpecType: + from ray.rllib.algorithms.dqn.dqn_rainbow_catalog import DQNRainbowCatalog + + if self.framework_str == "torch": + from ray.rllib.algorithms.dqn.torch.dqn_rainbow_torch_rl_module import ( + DQNRainbowTorchRLModule, + ) + + return RLModuleSpec( + module_class=DQNRainbowTorchRLModule, + catalog_class=DQNRainbowCatalog, + model_config=self.model_config, + ) + else: + raise ValueError( + f"The framework {self.framework_str} is not supported! " + "Use `config.framework('torch')` instead." + ) + + @property + @override(AlgorithmConfig) + def _model_config_auto_includes(self) -> Dict[str, Any]: + return super()._model_config_auto_includes | { + "double_q": self.double_q, + "dueling": self.dueling, + "epsilon": self.epsilon, + "noisy": self.noisy, + "num_atoms": self.num_atoms, + "std_init": self.sigma0, + "v_max": self.v_max, + "v_min": self.v_min, + } + + @override(AlgorithmConfig) + def get_default_learner_class(self) -> Union[Type["Learner"], str]: + if self.framework_str == "torch": + from ray.rllib.algorithms.dqn.torch.dqn_rainbow_torch_learner import ( + DQNRainbowTorchLearner, + ) + + return DQNRainbowTorchLearner + else: + raise ValueError( + f"The framework {self.framework_str} is not supported! " + "Use `config.framework('torch')` instead." + ) + + @override(AlgorithmConfig) + def build_learner_connector( + self, + input_observation_space, + input_action_space, + device=None, + ): + pipeline = super().build_learner_connector( + input_observation_space=input_observation_space, + input_action_space=input_action_space, + device=device, + ) + + # Prepend the "add-NEXT_OBS-from-episodes-to-train-batch" connector piece (right + # after the corresponding "add-OBS-..." default piece). + pipeline.insert_after( + AddObservationsFromEpisodesToBatch, + AddNextObservationsFromEpisodesToTrainBatch(), + ) + + return pipeline + + +def calculate_rr_weights(config: AlgorithmConfig) -> List[float]: + """Calculate the round robin weights for the rollout and train steps""" + if not config.training_intensity: + return [1, 1] + + # Calculate the "native ratio" as: + # [train-batch-size] / [size of env-rolled-out sampled data] + # This is to set freshly rollout-collected data in relation to + # the data we pull from the replay buffer (which also contains old + # samples). + native_ratio = config.total_train_batch_size / ( + config.get_rollout_fragment_length() + * config.num_envs_per_env_runner + # Add one to workers because the local + # worker usually collects experiences as well, and we avoid division by zero. + * max(config.num_env_runners + 1, 1) + ) + + # Training intensity is specified in terms of + # (steps_replayed / steps_sampled), so adjust for the native ratio. + sample_and_train_weight = config.training_intensity / native_ratio + if sample_and_train_weight < 1: + return [int(np.round(1 / sample_and_train_weight)), 1] + else: + return [1, int(np.round(sample_and_train_weight))] + + +class DQN(Algorithm): + @classmethod + @override(Algorithm) + def get_default_config(cls) -> AlgorithmConfig: + return DQNConfig() + + @classmethod + @override(Algorithm) + def get_default_policy_class( + cls, config: AlgorithmConfig + ) -> Optional[Type[Policy]]: + if config["framework"] == "torch": + return DQNTorchPolicy + else: + return DQNTFPolicy + + @override(Algorithm) + def training_step(self) -> None: + """DQN training iteration function. + + Each training iteration, we: + - Sample (MultiAgentBatch) from workers. + - Store new samples in replay buffer. + - Sample training batch (MultiAgentBatch) from replay buffer. + - Learn on training batch. + - Update remote workers' new policy weights. + - Update target network every `target_network_update_freq` sample steps. + - Return all collected metrics for the iteration. + + Returns: + The results dict from executing the training iteration. + """ + # Old API stack (Policy, RolloutWorker, Connector). + if not self.config.enable_env_runner_and_connector_v2: + return self._training_step_old_api_stack() + + # New API stack (RLModule, Learner, EnvRunner, ConnectorV2). + return self._training_step_new_api_stack(with_noise_reset=True) + + def _training_step_new_api_stack(self, *, with_noise_reset): + # Alternate between storing and sampling and training. + store_weight, sample_and_train_weight = calculate_rr_weights(self.config) + + # Run multiple sampling + storing to buffer iterations. + for _ in range(store_weight): + with self.metrics.log_time((TIMERS, ENV_RUNNER_SAMPLING_TIMER)): + # Sample in parallel from workers. + episodes, env_runner_results = synchronous_parallel_sample( + worker_set=self.env_runner_group, + concat=True, + sample_timeout_s=self.config.sample_timeout_s, + _uses_new_env_runners=True, + _return_metrics=True, + ) + # Reduce EnvRunner metrics over the n EnvRunners. + self.metrics.merge_and_log_n_dicts( + env_runner_results, key=ENV_RUNNER_RESULTS + ) + + # Add the sampled experiences to the replay buffer. + with self.metrics.log_time((TIMERS, REPLAY_BUFFER_ADD_DATA_TIMER)): + self.local_replay_buffer.add(episodes) + + if self.config.count_steps_by == "agent_steps": + current_ts = sum( + self.metrics.peek( + (ENV_RUNNER_RESULTS, NUM_AGENT_STEPS_SAMPLED_LIFETIME), default={} + ).values() + ) + else: + current_ts = self.metrics.peek( + (ENV_RUNNER_RESULTS, NUM_ENV_STEPS_SAMPLED_LIFETIME), default=0 + ) + + # If enough experiences have been sampled start training. + if current_ts >= self.config.num_steps_sampled_before_learning_starts: + # Resample noise for noisy networks, if necessary. Note, this + # is proposed in the "Noisy Networks for Exploration" paper + # (https://arxiv.org/abs/1706.10295) in Algorithm 1. The noise + # gets sampled once for each training loop. + if with_noise_reset: + self.learner_group.foreach_learner( + func=lambda lrnr: lrnr._reset_noise(), + timeout_seconds=0.0, # fire-and-forget + ) + # Run multiple sample-from-buffer and update iterations. + for _ in range(sample_and_train_weight): + # Sample a list of episodes used for learning from the replay buffer. + with self.metrics.log_time((TIMERS, REPLAY_BUFFER_SAMPLE_TIMER)): + episodes = self.local_replay_buffer.sample( + num_items=self.config.total_train_batch_size, + n_step=self.config.n_step, + gamma=self.config.gamma, + beta=self.config.replay_buffer_config.get("beta"), + sample_episodes=True, + ) + + # Perform an update on the buffer-sampled train batch. + with self.metrics.log_time((TIMERS, LEARNER_UPDATE_TIMER)): + learner_results = self.learner_group.update_from_episodes( + episodes=episodes, + timesteps={ + NUM_ENV_STEPS_SAMPLED_LIFETIME: ( + self.metrics.peek( + (ENV_RUNNER_RESULTS, NUM_ENV_STEPS_SAMPLED_LIFETIME) + ) + ), + NUM_AGENT_STEPS_SAMPLED_LIFETIME: ( + self.metrics.peek( + ( + ENV_RUNNER_RESULTS, + NUM_AGENT_STEPS_SAMPLED_LIFETIME, + ) + ) + ), + }, + ) + # Isolate TD-errors from result dicts (we should not log these to + # disk or WandB, they might be very large). + td_errors = defaultdict(list) + for res in learner_results: + for module_id, module_results in res.items(): + if TD_ERROR_KEY in module_results: + td_errors[module_id].extend( + convert_to_numpy( + module_results.pop(TD_ERROR_KEY).peek() + ) + ) + td_errors = { + module_id: {TD_ERROR_KEY: np.concatenate(s, axis=0)} + for module_id, s in td_errors.items() + } + self.metrics.merge_and_log_n_dicts( + learner_results, key=LEARNER_RESULTS + ) + + # Update replay buffer priorities. + with self.metrics.log_time((TIMERS, REPLAY_BUFFER_UPDATE_PRIOS_TIMER)): + update_priorities_in_episode_replay_buffer( + replay_buffer=self.local_replay_buffer, + td_errors=td_errors, + ) + + # Update weights and global_vars - after learning on the local worker - + # on all remote workers. + with self.metrics.log_time((TIMERS, SYNCH_WORKER_WEIGHTS_TIMER)): + modules_to_update = set(learner_results[0].keys()) - {ALL_MODULES} + # NOTE: the new API stack does not use global vars. + self.env_runner_group.sync_weights( + from_worker_or_learner_group=self.learner_group, + policies=modules_to_update, + global_vars=None, + inference_only=True, + ) + + def _training_step_old_api_stack(self) -> ResultDict: + """Training step for the old API stack. + + More specifically this training step relies on `RolloutWorker`. + """ + train_results = {} + + # We alternate between storing new samples and sampling and training + store_weight, sample_and_train_weight = calculate_rr_weights(self.config) + + for _ in range(store_weight): + # Sample (MultiAgentBatch) from workers. + with self._timers[SAMPLE_TIMER]: + new_sample_batch: SampleBatchType = synchronous_parallel_sample( + worker_set=self.env_runner_group, + concat=True, + sample_timeout_s=self.config.sample_timeout_s, + ) + + # Return early if all our workers failed. + if not new_sample_batch: + return {} + + # Update counters + self._counters[NUM_AGENT_STEPS_SAMPLED] += new_sample_batch.agent_steps() + self._counters[NUM_ENV_STEPS_SAMPLED] += new_sample_batch.env_steps() + + # Store new samples in replay buffer. + self.local_replay_buffer.add(new_sample_batch) + + global_vars = { + "timestep": self._counters[NUM_ENV_STEPS_SAMPLED], + } + + # Update target network every `target_network_update_freq` sample steps. + cur_ts = self._counters[ + ( + NUM_AGENT_STEPS_SAMPLED + if self.config.count_steps_by == "agent_steps" + else NUM_ENV_STEPS_SAMPLED + ) + ] + + if cur_ts > self.config.num_steps_sampled_before_learning_starts: + for _ in range(sample_and_train_weight): + # Sample training batch (MultiAgentBatch) from replay buffer. + train_batch = sample_min_n_steps_from_buffer( + self.local_replay_buffer, + self.config.total_train_batch_size, + count_by_agent_steps=self.config.count_steps_by == "agent_steps", + ) + + # Postprocess batch before we learn on it + post_fn = self.config.get("before_learn_on_batch") or (lambda b, *a: b) + train_batch = post_fn(train_batch, self.env_runner_group, self.config) + + # Learn on training batch. + # Use simple optimizer (only for multi-agent or tf-eager; all other + # cases should use the multi-GPU optimizer, even if only using 1 GPU) + if self.config.get("simple_optimizer") is True: + train_results = train_one_step(self, train_batch) + else: + train_results = multi_gpu_train_one_step(self, train_batch) + + # Update replay buffer priorities. + update_priorities_in_replay_buffer( + self.local_replay_buffer, + self.config, + train_batch, + train_results, + ) + + last_update = self._counters[LAST_TARGET_UPDATE_TS] + if cur_ts - last_update >= self.config.target_network_update_freq: + to_update = self.env_runner.get_policies_to_train() + self.env_runner.foreach_policy_to_train( + lambda p, pid, to_update=to_update: ( + pid in to_update and p.update_target() + ) + ) + self._counters[NUM_TARGET_UPDATES] += 1 + self._counters[LAST_TARGET_UPDATE_TS] = cur_ts + + # Update weights and global_vars - after learning on the local worker - + # on all remote workers. + with self._timers[SYNCH_WORKER_WEIGHTS_TIMER]: + self.env_runner_group.sync_weights(global_vars=global_vars) + + # Return all collected metrics for the iteration. + return train_results diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/dqn_rainbow_learner.py b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/dqn_rainbow_learner.py new file mode 100644 index 0000000000000000000000000000000000000000..eeaceab105093c7d81075eb81ddc8576ed196ee6 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/dqn_rainbow_learner.py @@ -0,0 +1,109 @@ +import abc +from typing import Any, Dict, Optional + +from ray.rllib.core.learner.learner import Learner +from ray.rllib.core.learner.utils import update_target_network +from ray.rllib.core.rl_module.apis.target_network_api import TargetNetworkAPI +from ray.rllib.core.rl_module.multi_rl_module import MultiRLModuleSpec +from ray.rllib.core.rl_module.rl_module import RLModuleSpec +from ray.rllib.utils.annotations import ( + override, + OverrideToImplementCustomLogic_CallToSuperRecommended, +) +from ray.rllib.utils.metrics import ( + LAST_TARGET_UPDATE_TS, + NUM_ENV_STEPS_SAMPLED_LIFETIME, + NUM_TARGET_UPDATES, +) +from ray.rllib.utils.typing import ModuleID, ShouldModuleBeUpdatedFn + + +# Now, this is double defined: In `SACRLModule` and here. I would keep it here +# or push it into the `Learner` as these are recurring keys in RL. +ATOMS = "atoms" +QF_LOSS_KEY = "qf_loss" +QF_LOGITS = "qf_logits" +QF_MEAN_KEY = "qf_mean" +QF_MAX_KEY = "qf_max" +QF_MIN_KEY = "qf_min" +QF_NEXT_PREDS = "qf_next_preds" +QF_TARGET_NEXT_PREDS = "qf_target_next_preds" +QF_TARGET_NEXT_PROBS = "qf_target_next_probs" +QF_PREDS = "qf_preds" +QF_PROBS = "qf_probs" +TD_ERROR_MEAN_KEY = "td_error_mean" + + +class DQNRainbowLearner(Learner): + @OverrideToImplementCustomLogic_CallToSuperRecommended + @override(Learner) + def build(self) -> None: + super().build() + + # Make target networks. + self.module.foreach_module( + lambda mid, mod: ( + mod.make_target_networks() + if isinstance(mod, TargetNetworkAPI) + else None + ) + ) + + @override(Learner) + def add_module( + self, + *, + module_id: ModuleID, + module_spec: RLModuleSpec, + config_overrides: Optional[Dict] = None, + new_should_module_be_updated: Optional[ShouldModuleBeUpdatedFn] = None, + ) -> MultiRLModuleSpec: + marl_spec = super().add_module( + module_id=module_id, + module_spec=module_spec, + config_overrides=config_overrides, + new_should_module_be_updated=new_should_module_be_updated, + ) + # Create target networks for added Module, if applicable. + if isinstance(self.module[module_id].unwrapped(), TargetNetworkAPI): + self.module[module_id].unwrapped().make_target_networks() + return marl_spec + + @override(Learner) + def after_gradient_based_update(self, *, timesteps: Dict[str, Any]) -> None: + """Updates the target Q Networks.""" + super().after_gradient_based_update(timesteps=timesteps) + + timestep = timesteps.get(NUM_ENV_STEPS_SAMPLED_LIFETIME, 0) + + # TODO (sven): Maybe we should have a `after_gradient_based_update` + # method per module? + for module_id, module in self.module._rl_modules.items(): + config = self.config.get_config_for_module(module_id) + last_update_ts_key = (module_id, LAST_TARGET_UPDATE_TS) + if timestep - self.metrics.peek( + last_update_ts_key, default=0 + ) >= config.target_network_update_freq and isinstance( + module.unwrapped(), TargetNetworkAPI + ): + for ( + main_net, + target_net, + ) in module.unwrapped().get_target_network_pairs(): + update_target_network( + main_net=main_net, + target_net=target_net, + tau=config.tau, + ) + # Increase lifetime target network update counter by one. + self.metrics.log_value((module_id, NUM_TARGET_UPDATES), 1, reduce="sum") + # Update the (single-value -> window=1) last updated timestep metric. + self.metrics.log_value(last_update_ts_key, timestep, window=1) + + @abc.abstractmethod + def _reset_noise(self) -> None: + """Resets the noise in the `Algorithm.training_step()` + + Note, this can be overridden by the user to reset the noise at different + points in the training loop. + """ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/dqn_rainbow_noisy_net_configs.py b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/dqn_rainbow_noisy_net_configs.py new file mode 100644 index 0000000000000000000000000000000000000000..dad6dd65e82b3ed87179f2a6d9b700922082c220 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/dqn_rainbow_noisy_net_configs.py @@ -0,0 +1,60 @@ +from dataclasses import dataclass + +from ray.rllib.core.models.base import Encoder +from ray.rllib.core.models.configs import _framework_implemented, _MLPConfig +from ray.rllib.utils.annotations import ExperimentalAPI, override + + +@ExperimentalAPI +@dataclass +class NoisyMLPConfig(_MLPConfig): + std_init: float = 0.1 + + @override(_MLPConfig) + def _validate(self, framework: str = "torch"): + """Makes sure that standard deviation is positive.""" + super()._validate(framework=framework) + + if self.std_init < 0.0: + raise ValueError( + f"`std_init` ({self.std_init}) of `NoisyMLPConfig must be " + "non-negative." + ) + + +@ExperimentalAPI +@dataclass +class NoisyMLPEncoderConfig(NoisyMLPConfig): + @_framework_implemented() + def build(self, framework: str = "torch") -> "Encoder": + self._validate(framework) + + if framework == "torch": + from ray.rllib.algorithms.dqn.torch.dqn_rainbow_torch_noisy_net import ( + TorchNoisyMLPEncoder, + ) + + return TorchNoisyMLPEncoder(self) + else: + raise ValueError( + "`NoisyMLPEncoder` is not implemented for framework " f"{framework}. " + ) + + +@ExperimentalAPI +@dataclass +class NoisyMLPHeadConfig(NoisyMLPConfig): + @_framework_implemented() + def build(self, framework: str = "torch") -> "Encoder": + self._validate(framework) + + if framework == "torch": + from ray.rllib.algorithms.dqn.torch.dqn_rainbow_torch_noisy_net import ( + TorchNoisyMLPHead, + ) + + return TorchNoisyMLPHead(self) + else: + raise ValueError( + "`NoisyMLPHead` is not implemented for framework " f"{framework}. " + ) diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/dqn_tf_policy.py b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/dqn_tf_policy.py new file mode 100644 index 0000000000000000000000000000000000000000..f3ef3118e493666f1087e922897121a1f3ad01af --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/dqn_tf_policy.py @@ -0,0 +1,500 @@ +"""TensorFlow policy class used for DQN""" + +from typing import Dict + +import gymnasium as gym +import numpy as np + +import ray +from ray.rllib.algorithms.dqn.distributional_q_tf_model import DistributionalQTFModel +from ray.rllib.evaluation.postprocessing import adjust_nstep +from ray.rllib.models import ModelCatalog +from ray.rllib.models.modelv2 import ModelV2 +from ray.rllib.models.tf.tf_action_dist import get_categorical_class_with_temperature +from ray.rllib.policy.policy import Policy +from ray.rllib.policy.sample_batch import SampleBatch +from ray.rllib.policy.tf_mixins import LearningRateSchedule, TargetNetworkMixin +from ray.rllib.policy.tf_policy_template import build_tf_policy +from ray.rllib.utils.error import UnsupportedSpaceException +from ray.rllib.utils.exploration import ParameterNoise +from ray.rllib.utils.framework import try_import_tf +from ray.rllib.utils.numpy import convert_to_numpy +from ray.rllib.utils.tf_utils import ( + huber_loss, + l2_loss, + make_tf_callable, + minimize_and_clip, + reduce_mean_ignore_inf, +) +from ray.rllib.utils.typing import AlgorithmConfigDict, ModelGradients, TensorType + +tf1, tf, tfv = try_import_tf() + +# Importance sampling weights for prioritized replay +PRIO_WEIGHTS = "weights" +Q_SCOPE = "q_func" +Q_TARGET_SCOPE = "target_q_func" + + +class QLoss: + def __init__( + self, + q_t_selected: TensorType, + q_logits_t_selected: TensorType, + q_tp1_best: TensorType, + q_dist_tp1_best: TensorType, + importance_weights: TensorType, + rewards: TensorType, + done_mask: TensorType, + gamma: float = 0.99, + n_step: int = 1, + num_atoms: int = 1, + v_min: float = -10.0, + v_max: float = 10.0, + loss_fn=huber_loss, + ): + + if num_atoms > 1: + # Distributional Q-learning which corresponds to an entropy loss + + z = tf.range(num_atoms, dtype=tf.float32) + z = v_min + z * (v_max - v_min) / float(num_atoms - 1) + + # (batch_size, 1) * (1, num_atoms) = (batch_size, num_atoms) + r_tau = tf.expand_dims(rewards, -1) + gamma**n_step * tf.expand_dims( + 1.0 - done_mask, -1 + ) * tf.expand_dims(z, 0) + r_tau = tf.clip_by_value(r_tau, v_min, v_max) + b = (r_tau - v_min) / ((v_max - v_min) / float(num_atoms - 1)) + lb = tf.floor(b) + ub = tf.math.ceil(b) + # indispensable judgement which is missed in most implementations + # when b happens to be an integer, lb == ub, so pr_j(s', a*) will + # be discarded because (ub-b) == (b-lb) == 0 + floor_equal_ceil = tf.cast(tf.less(ub - lb, 0.5), tf.float32) + + l_project = tf.one_hot( + tf.cast(lb, dtype=tf.int32), num_atoms + ) # (batch_size, num_atoms, num_atoms) + u_project = tf.one_hot( + tf.cast(ub, dtype=tf.int32), num_atoms + ) # (batch_size, num_atoms, num_atoms) + ml_delta = q_dist_tp1_best * (ub - b + floor_equal_ceil) + mu_delta = q_dist_tp1_best * (b - lb) + ml_delta = tf.reduce_sum(l_project * tf.expand_dims(ml_delta, -1), axis=1) + mu_delta = tf.reduce_sum(u_project * tf.expand_dims(mu_delta, -1), axis=1) + m = ml_delta + mu_delta + + # Rainbow paper claims that using this cross entropy loss for + # priority is robust and insensitive to `prioritized_replay_alpha` + self.td_error = tf.nn.softmax_cross_entropy_with_logits( + labels=m, logits=q_logits_t_selected + ) + self.loss = tf.reduce_mean( + self.td_error * tf.cast(importance_weights, tf.float32) + ) + self.stats = { + # TODO: better Q stats for dist dqn + "mean_td_error": tf.reduce_mean(self.td_error), + } + else: + q_tp1_best_masked = (1.0 - done_mask) * q_tp1_best + + # compute RHS of bellman equation + q_t_selected_target = rewards + gamma**n_step * q_tp1_best_masked + + # compute the error (potentially clipped) + self.td_error = q_t_selected - tf.stop_gradient(q_t_selected_target) + self.loss = tf.reduce_mean( + tf.cast(importance_weights, tf.float32) * loss_fn(self.td_error) + ) + self.stats = { + "mean_q": tf.reduce_mean(q_t_selected), + "min_q": tf.reduce_min(q_t_selected), + "max_q": tf.reduce_max(q_t_selected), + "mean_td_error": tf.reduce_mean(self.td_error), + } + + +class ComputeTDErrorMixin: + """Assign the `compute_td_error` method to the DQNTFPolicy + + This allows us to prioritize on the worker side. + """ + + def __init__(self): + @make_tf_callable(self.get_session(), dynamic_shape=True) + def compute_td_error( + obs_t, act_t, rew_t, obs_tp1, terminateds_mask, importance_weights + ): + # Do forward pass on loss to update td error attribute + build_q_losses( + self, + self.model, + None, + { + SampleBatch.CUR_OBS: tf.convert_to_tensor(obs_t), + SampleBatch.ACTIONS: tf.convert_to_tensor(act_t), + SampleBatch.REWARDS: tf.convert_to_tensor(rew_t), + SampleBatch.NEXT_OBS: tf.convert_to_tensor(obs_tp1), + SampleBatch.TERMINATEDS: tf.convert_to_tensor(terminateds_mask), + PRIO_WEIGHTS: tf.convert_to_tensor(importance_weights), + }, + ) + + return self.q_loss.td_error + + self.compute_td_error = compute_td_error + + +def build_q_model( + policy: Policy, + obs_space: gym.spaces.Space, + action_space: gym.spaces.Space, + config: AlgorithmConfigDict, +) -> ModelV2: + """Build q_model and target_model for DQN + + Args: + policy: The Policy, which will use the model for optimization. + obs_space (gym.spaces.Space): The policy's observation space. + action_space (gym.spaces.Space): The policy's action space. + config (AlgorithmConfigDict): + + Returns: + ModelV2: The Model for the Policy to use. + Note: The target q model will not be returned, just assigned to + `policy.target_model`. + """ + if not isinstance(action_space, gym.spaces.Discrete): + raise UnsupportedSpaceException( + "Action space {} is not supported for DQN.".format(action_space) + ) + + if config["hiddens"]: + # try to infer the last layer size, otherwise fall back to 256 + num_outputs = ([256] + list(config["model"]["fcnet_hiddens"]))[-1] + config["model"]["no_final_linear"] = True + else: + num_outputs = action_space.n + + q_model = ModelCatalog.get_model_v2( + obs_space=obs_space, + action_space=action_space, + num_outputs=num_outputs, + model_config=config["model"], + framework="tf", + model_interface=DistributionalQTFModel, + name=Q_SCOPE, + num_atoms=config["num_atoms"], + dueling=config["dueling"], + q_hiddens=config["hiddens"], + use_noisy=config["noisy"], + v_min=config["v_min"], + v_max=config["v_max"], + sigma0=config["sigma0"], + # TODO(sven): Move option to add LayerNorm after each Dense + # generically into ModelCatalog. + add_layer_norm=isinstance(getattr(policy, "exploration", None), ParameterNoise) + or config["exploration_config"]["type"] == "ParameterNoise", + ) + + policy.target_model = ModelCatalog.get_model_v2( + obs_space=obs_space, + action_space=action_space, + num_outputs=num_outputs, + model_config=config["model"], + framework="tf", + model_interface=DistributionalQTFModel, + name=Q_TARGET_SCOPE, + num_atoms=config["num_atoms"], + dueling=config["dueling"], + q_hiddens=config["hiddens"], + use_noisy=config["noisy"], + v_min=config["v_min"], + v_max=config["v_max"], + sigma0=config["sigma0"], + # TODO(sven): Move option to add LayerNorm after each Dense + # generically into ModelCatalog. + add_layer_norm=isinstance(getattr(policy, "exploration", None), ParameterNoise) + or config["exploration_config"]["type"] == "ParameterNoise", + ) + + return q_model + + +def get_distribution_inputs_and_class( + policy: Policy, model: ModelV2, input_dict: SampleBatch, *, explore=True, **kwargs +): + q_vals = compute_q_values( + policy, model, input_dict, state_batches=None, explore=explore + ) + q_vals = q_vals[0] if isinstance(q_vals, tuple) else q_vals + + policy.q_values = q_vals + + # Return a Torch TorchCategorical distribution where the temperature + # parameter is partially binded to the configured value. + temperature = policy.config["categorical_distribution_temperature"] + + return ( + policy.q_values, + get_categorical_class_with_temperature(temperature), + [], + ) # state-out + + +def build_q_losses(policy: Policy, model, _, train_batch: SampleBatch) -> TensorType: + """Constructs the loss for DQNTFPolicy. + + Args: + policy: The Policy to calculate the loss for. + model (ModelV2): The Model to calculate the loss for. + train_batch: The training data. + + Returns: + TensorType: A single loss tensor. + """ + config = policy.config + # q network evaluation + q_t, q_logits_t, q_dist_t, _ = compute_q_values( + policy, + model, + SampleBatch({"obs": train_batch[SampleBatch.CUR_OBS]}), + state_batches=None, + explore=False, + ) + + # target q network evalution + q_tp1, q_logits_tp1, q_dist_tp1, _ = compute_q_values( + policy, + policy.target_model, + SampleBatch({"obs": train_batch[SampleBatch.NEXT_OBS]}), + state_batches=None, + explore=False, + ) + if not hasattr(policy, "target_q_func_vars"): + policy.target_q_func_vars = policy.target_model.variables() + + # q scores for actions which we know were selected in the given state. + one_hot_selection = tf.one_hot( + tf.cast(train_batch[SampleBatch.ACTIONS], tf.int32), policy.action_space.n + ) + q_t_selected = tf.reduce_sum(q_t * one_hot_selection, 1) + q_logits_t_selected = tf.reduce_sum( + q_logits_t * tf.expand_dims(one_hot_selection, -1), 1 + ) + + # compute estimate of best possible value starting from state at t + 1 + if config["double_q"]: + ( + q_tp1_using_online_net, + q_logits_tp1_using_online_net, + q_dist_tp1_using_online_net, + _, + ) = compute_q_values( + policy, + model, + SampleBatch({"obs": train_batch[SampleBatch.NEXT_OBS]}), + state_batches=None, + explore=False, + ) + q_tp1_best_using_online_net = tf.argmax(q_tp1_using_online_net, 1) + q_tp1_best_one_hot_selection = tf.one_hot( + q_tp1_best_using_online_net, policy.action_space.n + ) + q_tp1_best = tf.reduce_sum(q_tp1 * q_tp1_best_one_hot_selection, 1) + q_dist_tp1_best = tf.reduce_sum( + q_dist_tp1 * tf.expand_dims(q_tp1_best_one_hot_selection, -1), 1 + ) + else: + q_tp1_best_one_hot_selection = tf.one_hot( + tf.argmax(q_tp1, 1), policy.action_space.n + ) + q_tp1_best = tf.reduce_sum(q_tp1 * q_tp1_best_one_hot_selection, 1) + q_dist_tp1_best = tf.reduce_sum( + q_dist_tp1 * tf.expand_dims(q_tp1_best_one_hot_selection, -1), 1 + ) + + loss_fn = huber_loss if policy.config["td_error_loss_fn"] == "huber" else l2_loss + + policy.q_loss = QLoss( + q_t_selected, + q_logits_t_selected, + q_tp1_best, + q_dist_tp1_best, + train_batch[PRIO_WEIGHTS], + tf.cast(train_batch[SampleBatch.REWARDS], tf.float32), + tf.cast(train_batch[SampleBatch.TERMINATEDS], tf.float32), + config["gamma"], + config["n_step"], + config["num_atoms"], + config["v_min"], + config["v_max"], + loss_fn, + ) + + return policy.q_loss.loss + + +def adam_optimizer( + policy: Policy, config: AlgorithmConfigDict +) -> "tf.keras.optimizers.Optimizer": + if policy.config["framework"] == "tf2": + return tf.keras.optimizers.Adam( + learning_rate=policy.cur_lr, epsilon=config["adam_epsilon"] + ) + else: + return tf1.train.AdamOptimizer( + learning_rate=policy.cur_lr, epsilon=config["adam_epsilon"] + ) + + +def clip_gradients( + policy: Policy, optimizer: "tf.keras.optimizers.Optimizer", loss: TensorType +) -> ModelGradients: + if not hasattr(policy, "q_func_vars"): + policy.q_func_vars = policy.model.variables() + + return minimize_and_clip( + optimizer, + loss, + var_list=policy.q_func_vars, + clip_val=policy.config["grad_clip"], + ) + + +def build_q_stats(policy: Policy, batch) -> Dict[str, TensorType]: + return dict( + { + "cur_lr": tf.cast(policy.cur_lr, tf.float64), + }, + **policy.q_loss.stats + ) + + +def setup_mid_mixins(policy: Policy, obs_space, action_space, config) -> None: + LearningRateSchedule.__init__(policy, config["lr"], config["lr_schedule"]) + ComputeTDErrorMixin.__init__(policy) + + +def setup_late_mixins( + policy: Policy, + obs_space: gym.spaces.Space, + action_space: gym.spaces.Space, + config: AlgorithmConfigDict, +) -> None: + TargetNetworkMixin.__init__(policy) + + +def compute_q_values( + policy: Policy, + model: ModelV2, + input_batch: SampleBatch, + state_batches=None, + seq_lens=None, + explore=None, + is_training: bool = False, +): + + config = policy.config + + model_out, state = model(input_batch, state_batches or [], seq_lens) + + if config["num_atoms"] > 1: + ( + action_scores, + z, + support_logits_per_action, + logits, + dist, + ) = model.get_q_value_distributions(model_out) + else: + (action_scores, logits, dist) = model.get_q_value_distributions(model_out) + + if config["dueling"]: + state_score = model.get_state_value(model_out) + if config["num_atoms"] > 1: + support_logits_per_action_mean = tf.reduce_mean( + support_logits_per_action, 1 + ) + support_logits_per_action_centered = ( + support_logits_per_action + - tf.expand_dims(support_logits_per_action_mean, 1) + ) + support_logits_per_action = ( + tf.expand_dims(state_score, 1) + support_logits_per_action_centered + ) + support_prob_per_action = tf.nn.softmax(logits=support_logits_per_action) + value = tf.reduce_sum(input_tensor=z * support_prob_per_action, axis=-1) + logits = support_logits_per_action + dist = support_prob_per_action + else: + action_scores_mean = reduce_mean_ignore_inf(action_scores, 1) + action_scores_centered = action_scores - tf.expand_dims( + action_scores_mean, 1 + ) + value = state_score + action_scores_centered + else: + value = action_scores + + return value, logits, dist, state + + +def postprocess_nstep_and_prio( + policy: Policy, batch: SampleBatch, other_agent=None, episode=None +) -> SampleBatch: + # N-step Q adjustments. + if policy.config["n_step"] > 1: + adjust_nstep(policy.config["n_step"], policy.config["gamma"], batch) + + # Create dummy prio-weights (1.0) in case we don't have any in + # the batch. + if PRIO_WEIGHTS not in batch: + batch[PRIO_WEIGHTS] = np.ones_like(batch[SampleBatch.REWARDS]) + + # Prioritize on the worker side. + if batch.count > 0 and policy.config["replay_buffer_config"].get( + "worker_side_prioritization", False + ): + td_errors = policy.compute_td_error( + batch[SampleBatch.OBS], + batch[SampleBatch.ACTIONS], + batch[SampleBatch.REWARDS], + batch[SampleBatch.NEXT_OBS], + batch[SampleBatch.TERMINATEDS], + batch[PRIO_WEIGHTS], + ) + # Retain compatibility with old-style Replay args + epsilon = policy.config.get("replay_buffer_config", {}).get( + "prioritized_replay_eps" + ) or policy.config.get("prioritized_replay_eps") + if epsilon is None: + raise ValueError("prioritized_replay_eps not defined in config.") + + new_priorities = np.abs(convert_to_numpy(td_errors)) + epsilon + batch[PRIO_WEIGHTS] = new_priorities + + return batch + + +DQNTFPolicy = build_tf_policy( + name="DQNTFPolicy", + get_default_config=lambda: ray.rllib.algorithms.dqn.dqn.DQNConfig(), + make_model=build_q_model, + action_distribution_fn=get_distribution_inputs_and_class, + loss_fn=build_q_losses, + stats_fn=build_q_stats, + postprocess_fn=postprocess_nstep_and_prio, + optimizer_fn=adam_optimizer, + compute_gradients_fn=clip_gradients, + extra_action_out_fn=lambda policy: {"q_values": policy.q_values}, + extra_learn_fetches_fn=lambda policy: {"td_error": policy.q_loss.td_error}, + before_loss_init=setup_mid_mixins, + after_init=setup_late_mixins, + mixins=[ + TargetNetworkMixin, + ComputeTDErrorMixin, + LearningRateSchedule, + ], +) diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/dqn_torch_model.py b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/dqn_torch_model.py new file mode 100644 index 0000000000000000000000000000000000000000..5b93e271bc4be5698cf055b53a79393aa57d96ad --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/dqn_torch_model.py @@ -0,0 +1,173 @@ +"""PyTorch model for DQN""" + +from typing import Sequence +import gymnasium as gym +from ray.rllib.models.torch.misc import SlimFC +from ray.rllib.models.torch.modules.noisy_layer import NoisyLayer +from ray.rllib.models.torch.torch_modelv2 import TorchModelV2 +from ray.rllib.utils.framework import try_import_torch +from ray.rllib.utils.typing import ModelConfigDict + +torch, nn = try_import_torch() + + +class DQNTorchModel(TorchModelV2, nn.Module): + """Extension of standard TorchModelV2 to provide dueling-Q functionality.""" + + def __init__( + self, + obs_space: gym.spaces.Space, + action_space: gym.spaces.Space, + num_outputs: int, + model_config: ModelConfigDict, + name: str, + *, + q_hiddens: Sequence[int] = (256,), + dueling: bool = False, + dueling_activation: str = "relu", + num_atoms: int = 1, + use_noisy: bool = False, + v_min: float = -10.0, + v_max: float = 10.0, + sigma0: float = 0.5, + # TODO(sven): Move `add_layer_norm` into ModelCatalog as + # generic option, then error if we use ParameterNoise as + # Exploration type and do not have any LayerNorm layers in + # the net. + add_layer_norm: bool = False + ): + """Initialize variables of this model. + + Extra model kwargs: + q_hiddens (Sequence[int]): List of layer-sizes after(!) the + Advantages(A)/Value(V)-split. Hence, each of the A- and V- + branches will have this structure of Dense layers. To define + the NN before this A/V-split, use - as always - + config["model"]["fcnet_hiddens"]. + dueling: Whether to build the advantage(A)/value(V) heads + for DDQN. If True, Q-values are calculated as: + Q = (A - mean[A]) + V. If False, raw NN output is interpreted + as Q-values. + dueling_activation: The activation to use for all dueling + layers (A- and V-branch). One of "relu", "tanh", "linear". + num_atoms: If >1, enables distributional DQN. + use_noisy: Use noisy layers. + v_min: Min value support for distributional DQN. + v_max: Max value support for distributional DQN. + sigma0 (float): Initial value of noisy layers. + add_layer_norm: Enable layer norm (for param noise). + """ + nn.Module.__init__(self) + super(DQNTorchModel, self).__init__( + obs_space, action_space, num_outputs, model_config, name + ) + + self.dueling = dueling + self.num_atoms = num_atoms + self.v_min = v_min + self.v_max = v_max + self.sigma0 = sigma0 + ins = num_outputs + + advantage_module = nn.Sequential() + value_module = nn.Sequential() + + # Dueling case: Build the shared (advantages and value) fc-network. + for i, n in enumerate(q_hiddens): + if use_noisy: + advantage_module.add_module( + "dueling_A_{}".format(i), + NoisyLayer( + ins, n, sigma0=self.sigma0, activation=dueling_activation + ), + ) + value_module.add_module( + "dueling_V_{}".format(i), + NoisyLayer( + ins, n, sigma0=self.sigma0, activation=dueling_activation + ), + ) + else: + advantage_module.add_module( + "dueling_A_{}".format(i), + SlimFC(ins, n, activation_fn=dueling_activation), + ) + value_module.add_module( + "dueling_V_{}".format(i), + SlimFC(ins, n, activation_fn=dueling_activation), + ) + # Add LayerNorm after each Dense. + if add_layer_norm: + advantage_module.add_module( + "LayerNorm_A_{}".format(i), nn.LayerNorm(n) + ) + value_module.add_module("LayerNorm_V_{}".format(i), nn.LayerNorm(n)) + ins = n + + # Actual Advantages layer (nodes=num-actions). + if use_noisy: + advantage_module.add_module( + "A", + NoisyLayer( + ins, self.action_space.n * self.num_atoms, sigma0, activation=None + ), + ) + elif q_hiddens: + advantage_module.add_module( + "A", SlimFC(ins, action_space.n * self.num_atoms, activation_fn=None) + ) + + self.advantage_module = advantage_module + + # Value layer (nodes=1). + if self.dueling: + if use_noisy: + value_module.add_module( + "V", NoisyLayer(ins, self.num_atoms, sigma0, activation=None) + ) + elif q_hiddens: + value_module.add_module( + "V", SlimFC(ins, self.num_atoms, activation_fn=None) + ) + self.value_module = value_module + + def get_q_value_distributions(self, model_out): + """Returns distributional values for Q(s, a) given a state embedding. + + Override this in your custom model to customize the Q output head. + + Args: + model_out: Embedding from the model layers. + + Returns: + (action_scores, logits, dist) if num_atoms == 1, otherwise + (action_scores, z, support_logits_per_action, logits, dist) + """ + action_scores = self.advantage_module(model_out) + + if self.num_atoms > 1: + # Distributional Q-learning uses a discrete support z + # to represent the action value distribution + z = torch.arange(0.0, self.num_atoms, dtype=torch.float32).to( + action_scores.device + ) + z = self.v_min + z * (self.v_max - self.v_min) / float(self.num_atoms - 1) + + support_logits_per_action = torch.reshape( + action_scores, shape=(-1, self.action_space.n, self.num_atoms) + ) + support_prob_per_action = nn.functional.softmax( + support_logits_per_action, dim=-1 + ) + action_scores = torch.sum(z * support_prob_per_action, dim=-1) + logits = support_logits_per_action + probs = support_prob_per_action + return action_scores, z, support_logits_per_action, logits, probs + else: + logits = torch.unsqueeze(torch.ones_like(action_scores), -1) + return action_scores, logits, logits + + def get_state_value(self, model_out): + """Returns the state value prediction for the given state embedding.""" + + return self.value_module(model_out) diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/torch/__init__.py b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/torch/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/torch/__pycache__/__init__.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/torch/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9f4578aad2023340d3a347baa72e03f28914609 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/torch/__pycache__/__init__.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/torch/__pycache__/dqn_rainbow_torch_learner.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/torch/__pycache__/dqn_rainbow_torch_learner.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a911c18e8e2d8995f632cadebb04182cd88fb54 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/torch/__pycache__/dqn_rainbow_torch_learner.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/torch/__pycache__/dqn_rainbow_torch_rl_module.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/torch/__pycache__/dqn_rainbow_torch_rl_module.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a54eeba00c154ff80969e94a31f21b54863cf61 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/torch/__pycache__/dqn_rainbow_torch_rl_module.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/torch/dqn_rainbow_torch_learner.py b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/torch/dqn_rainbow_torch_learner.py new file mode 100644 index 0000000000000000000000000000000000000000..b38e223498ea98c74dd376469cc77a388fd24431 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/torch/dqn_rainbow_torch_learner.py @@ -0,0 +1,264 @@ +from typing import Dict + +from ray.rllib.algorithms.dqn.dqn import DQNConfig +from ray.rllib.algorithms.dqn.dqn_rainbow_learner import ( + ATOMS, + DQNRainbowLearner, + QF_LOSS_KEY, + QF_LOGITS, + QF_MEAN_KEY, + QF_MAX_KEY, + QF_MIN_KEY, + QF_NEXT_PREDS, + QF_TARGET_NEXT_PREDS, + QF_TARGET_NEXT_PROBS, + QF_PREDS, + QF_PROBS, + TD_ERROR_MEAN_KEY, +) +from ray.rllib.core.columns import Columns +from ray.rllib.core.learner.torch.torch_learner import TorchLearner +from ray.rllib.utils.annotations import override +from ray.rllib.utils.framework import try_import_torch +from ray.rllib.utils.metrics import TD_ERROR_KEY +from ray.rllib.utils.typing import ModuleID, TensorType + + +torch, nn = try_import_torch() + + +class DQNRainbowTorchLearner(DQNRainbowLearner, TorchLearner): + """Implements `torch`-specific DQN Rainbow loss logic on top of `DQNRainbowLearner` + + This ' Learner' class implements the loss in its + `self.compute_loss_for_module()` method. + """ + + @override(TorchLearner) + def compute_loss_for_module( + self, + *, + module_id: ModuleID, + config: DQNConfig, + batch: Dict, + fwd_out: Dict[str, TensorType] + ) -> TensorType: + + q_curr = fwd_out[QF_PREDS] + q_target_next = fwd_out[QF_TARGET_NEXT_PREDS] + + # Get the Q-values for the selected actions in the rollout. + # TODO (simon, sven): Check, if we can use `gather` with a complex action + # space - we might need the one_hot_selection. Also test performance. + q_selected = torch.nan_to_num( + torch.gather( + q_curr, + dim=1, + index=batch[Columns.ACTIONS].view(-1, 1).expand(-1, 1).long(), + ), + neginf=0.0, + ).squeeze() + + # Use double Q learning. + if config.double_q: + # Then we evaluate the target Q-function at the best action (greedy action) + # over the online Q-function. + # Mark the best online Q-value of the next state. + q_next_best_idx = ( + torch.argmax(fwd_out[QF_NEXT_PREDS], dim=1).unsqueeze(dim=-1).long() + ) + # Get the Q-value of the target network at maximum of the online network + # (bootstrap action). + q_next_best = torch.nan_to_num( + torch.gather(q_target_next, dim=1, index=q_next_best_idx), + neginf=0.0, + ).squeeze() + else: + # Mark the maximum Q-value(s). + q_next_best_idx = ( + torch.argmax(q_target_next, dim=1).unsqueeze(dim=-1).long() + ) + # Get the maximum Q-value(s). + q_next_best = torch.nan_to_num( + torch.gather(q_target_next, dim=1, index=q_next_best_idx), + neginf=0.0, + ).squeeze() + + # If we learn a Q-distribution. + if config.num_atoms > 1: + # Extract the Q-logits evaluated at the selected actions. + # (Note, `torch.gather` should be faster than multiplication + # with a one-hot tensor.) + # (32, 2, 10) -> (32, 10) + q_logits_selected = torch.gather( + fwd_out[QF_LOGITS], + dim=1, + # Note, the Q-logits are of shape (B, action_space.n, num_atoms) + # while the actions have shape (B, 1). We reshape actions to + # (B, 1, num_atoms). + index=batch[Columns.ACTIONS] + .view(-1, 1, 1) + .expand(-1, 1, config.num_atoms) + .long(), + ).squeeze(dim=1) + # Get the probabilies for the maximum Q-value(s). + q_probs_next_best = torch.gather( + fwd_out[QF_TARGET_NEXT_PROBS], + dim=1, + # Change the view and then expand to get to the dimensions + # of the probabilities (dims 0 and 2, 1 should be reduced + # from 2 -> 1). + index=q_next_best_idx.view(-1, 1, 1).expand(-1, 1, config.num_atoms), + ).squeeze(dim=1) + + # For distributional Q-learning we use an entropy loss. + + # Extract the support grid for the Q distribution. + z = fwd_out[ATOMS] + # TODO (simon): Enable computing on GPU. + # (batch_size, 1) * (1, num_atoms) = (batch_size, num_atoms)s + r_tau = torch.clamp( + batch[Columns.REWARDS].unsqueeze(dim=-1) + + ( + config.gamma ** batch["n_step"] + * (1.0 - batch[Columns.TERMINATEDS].float()) + ).unsqueeze(dim=-1) + * z, + config.v_min, + config.v_max, + ).squeeze(dim=1) + # (32, 10) + b = (r_tau - config.v_min) / ( + (config.v_max - config.v_min) / float(config.num_atoms - 1.0) + ) + lower_bound = torch.floor(b) + upper_bound = torch.ceil(b) + + floor_equal_ceil = ((upper_bound - lower_bound) < 0.5).float() + + # (B, num_atoms, num_atoms). + lower_projection = nn.functional.one_hot( + lower_bound.long(), config.num_atoms + ) + upper_projection = nn.functional.one_hot( + upper_bound.long(), config.num_atoms + ) + # (32, 10) + ml_delta = q_probs_next_best * (upper_bound - b + floor_equal_ceil) + mu_delta = q_probs_next_best * (b - lower_bound) + # (32, 10) + ml_delta = torch.sum(lower_projection * ml_delta.unsqueeze(dim=-1), dim=1) + mu_delta = torch.sum(upper_projection * mu_delta.unsqueeze(dim=-1), dim=1) + # We do not want to propagate through the distributional targets. + # (32, 10) + m = (ml_delta + mu_delta).detach() + + # The Rainbow paper claims to use the KL-divergence loss. This is identical + # to using the cross-entropy (differs only by entropy which is constant) + # when optimizing by the gradient (the gradient is identical). + td_error = nn.CrossEntropyLoss(reduction="none")(q_logits_selected, m) + # Compute the weighted loss (importance sampling weights). + total_loss = torch.mean(batch["weights"] * td_error) + else: + # Masked all Q-values with terminated next states in the targets. + q_next_best_masked = ( + 1.0 - batch[Columns.TERMINATEDS].float() + ) * q_next_best + + # Compute the RHS of the Bellman equation. + # Detach this node from the computation graph as we do not want to + # backpropagate through the target network when optimizing the Q loss. + q_selected_target = ( + batch[Columns.REWARDS] + + (config.gamma ** batch["n_step"]) * q_next_best_masked + ).detach() + + # Choose the requested loss function. Note, in case of the Huber loss + # we fall back to the default of `delta=1.0`. + loss_fn = nn.HuberLoss if config.td_error_loss_fn == "huber" else nn.MSELoss + # Compute the TD error. + td_error = torch.abs(q_selected - q_selected_target) + # Compute the weighted loss (importance sampling weights). + total_loss = torch.mean( + batch["weights"] + * loss_fn(reduction="none")(q_selected, q_selected_target) + ) + + # Log the TD-error with reduce=None, such that - in case we have n parallel + # Learners - we will re-concatenate the produced TD-error tensors to yield + # a 1:1 representation of the original batch. + self.metrics.log_value( + key=(module_id, TD_ERROR_KEY), + value=td_error, + reduce=None, + clear_on_reduce=True, + ) + # Log other important loss stats (reduce=mean (default), but with window=1 + # in order to keep them history free). + self.metrics.log_dict( + { + QF_LOSS_KEY: total_loss, + QF_MEAN_KEY: torch.mean(q_selected), + QF_MAX_KEY: torch.max(q_selected), + QF_MIN_KEY: torch.min(q_selected), + TD_ERROR_MEAN_KEY: torch.mean(td_error), + }, + key=module_id, + window=1, # <- single items (should not be mean/ema-reduced over time). + ) + # If we learn a Q-value distribution store the support and average + # probabilities. + if config.num_atoms > 1: + # Log important loss stats. + self.metrics.log_dict( + { + ATOMS: z, + # The absolute difference in expectation between the actions + # should (at least mildly) rise. + "expectations_abs_diff": torch.mean( + torch.abs( + torch.diff( + torch.sum(fwd_out[QF_PROBS].mean(dim=0) * z, dim=1) + ).mean(dim=0) + ) + ), + # The total variation distance should measure the distance between + # return distributions of different actions. This should (at least + # mildly) increase during training when the agent differentiates + # more between actions. + "dist_total_variation_dist": torch.diff( + fwd_out[QF_PROBS].mean(dim=0), dim=0 + ) + .abs() + .sum() + * 0.5, + # The maximum distance between the action distributions. This metric + # should increase over the course of training. + "dist_max_abs_distance": torch.max( + torch.diff(fwd_out[QF_PROBS].mean(dim=0), dim=0).abs() + ), + # Mean shannon entropy of action distributions. This should decrease + # over the course of training. + "action_dist_mean_entropy": torch.mean( + ( + fwd_out[QF_PROBS].mean(dim=0) + * torch.log(fwd_out[QF_PROBS].mean(dim=0)) + ).sum(dim=1), + dim=0, + ), + }, + key=module_id, + window=1, # <- single items (should not be mean/ema-reduced over time). + ) + + return total_loss + + def _reset_noise(self) -> None: + # Reset the noise for all noisy modules, if necessary. + self.module.foreach_module( + lambda mid, module: ( + module._reset_noise(target=True) + if hasattr(module, "_reset_noise") + else None + ) + ) diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/torch/dqn_rainbow_torch_noisy_net.py b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/torch/dqn_rainbow_torch_noisy_net.py new file mode 100644 index 0000000000000000000000000000000000000000..ddd8492e6eb9a7877d08e84af2016d594331fae1 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/torch/dqn_rainbow_torch_noisy_net.py @@ -0,0 +1,276 @@ +from typing import Callable, Dict, List, Optional, Union + +from ray.rllib.algorithms.dqn.dqn_rainbow_noisy_net_configs import ( + NoisyMLPEncoderConfig, + NoisyMLPHeadConfig, +) +from ray.rllib.algorithms.dqn.torch.torch_noisy_linear import NoisyLinear +from ray.rllib.core.columns import Columns +from ray.rllib.core.models.base import Encoder, ENCODER_OUT, Model +from ray.rllib.core.models.torch.base import TorchModel +from ray.rllib.models.utils import get_activation_fn, get_initializer_fn +from ray.rllib.utils.annotations import override +from ray.rllib.utils.framework import try_import_torch + +torch, nn = try_import_torch() + + +class TorchNoisyMLPEncoder(TorchModel, Encoder): + def __init__(self, config: NoisyMLPEncoderConfig) -> None: + TorchModel.__init__(self, config) + Encoder.__init__(self, config) + + # Create the noisy network. + self.net = TorchNoisyMLP( + input_dim=config.input_dims[0], + hidden_layer_dims=config.hidden_layer_dims, + hidden_layer_activation=config.hidden_layer_activation, + hidden_layer_use_layernorm=config.hidden_layer_use_layernorm, + hidden_layer_use_bias=config.hidden_layer_use_bias, + hidden_layer_weights_initializer=config.hidden_layer_weights_initializer, + hidden_layer_weights_initializer_config=( + config.hidden_layer_weights_initializer_config + ), + hidden_layer_bias_initializer=config.hidden_layer_bias_initializer, + hidden_layer_bias_initializer_config=( + config.hidden_layer_bias_initializer_config + ), + output_dim=config.output_layer_dim, + output_activation=config.output_layer_activation, + output_use_bias=config.output_layer_use_bias, + output_weights_initializer=config.output_layer_weights_initializer, + output_weights_initializer_config=( + config.output_layer_weights_initializer_config + ), + output_bias_initializer=config.output_layer_bias_initializer, + output_bias_initializer_config=config.output_layer_bias_initializer_config, + # Note, this is the only additional parameter in regard to a regular MLP. + std_init=config.std_init, + ) + + @override(Model) + def _forward(self, inputs: dict, **kwargs) -> dict: + return {ENCODER_OUT: self.net(inputs[Columns.OBS])} + + def _reset_noise(self): + # Reset the noise in the complete network. + self.net._reset_noise() + + +class TorchNoisyMLPHead(TorchModel): + def __init__(self, config: NoisyMLPHeadConfig) -> None: + super().__init__(config) + + self.net = TorchNoisyMLP( + input_dim=config.input_dims[0], + hidden_layer_dims=config.hidden_layer_dims, + hidden_layer_activation=config.hidden_layer_activation, + hidden_layer_use_layernorm=config.hidden_layer_use_layernorm, + hidden_layer_use_bias=config.hidden_layer_use_bias, + hidden_layer_weights_initializer=config.hidden_layer_weights_initializer, + hidden_layer_weights_initializer_config=( + config.hidden_layer_weights_initializer_config + ), + hidden_layer_bias_initializer=config.hidden_layer_bias_initializer, + hidden_layer_bias_initializer_config=( + config.hidden_layer_bias_initializer_config + ), + output_dim=config.output_layer_dim, + output_activation=config.output_layer_activation, + output_use_bias=config.output_layer_use_bias, + output_weights_initializer=config.output_layer_weights_initializer, + output_weights_initializer_config=( + config.output_layer_weights_initializer_config + ), + output_bias_initializer=config.output_layer_bias_initializer, + output_bias_initializer_config=config.output_layer_bias_initializer_config, + # Note, this is the only additional parameter in regard to a regular MLP. + std_init=config.std_init, + ) + + @override(Model) + def _forward(self, inputs: torch.Tensor, **kwargs) -> torch.Tensor: + return self.net(inputs) + + def _reset_noise(self) -> None: + # Reset the noise in the complete network. + self.net._reset_noise() + + +class TorchNoisyMLP(nn.Module): + """A multi-layer perceptron with N dense layers. + + All layers (except for an optional additional extra output layer) share the same + activation function, bias setup (use bias or not), and LayerNorm setup + (use layer normalization or not). + + If `output_dim` (int) is not None, an additional, extra output dense layer is added, + which might have its own activation function (e.g. "linear"). However, the output + layer does NOT use layer normalization. + """ + + def __init__( + self, + *, + input_dim: int, + hidden_layer_dims: List[int], + hidden_layer_activation: Union[str, Callable] = "relu", + hidden_layer_use_bias: bool = True, + hidden_layer_use_layernorm: bool = False, + hidden_layer_weights_initializer: Optional[Union[str, Callable]] = None, + hidden_layer_weights_initializer_config: Optional[Union[str, Callable]] = None, + hidden_layer_bias_initializer: Optional[Union[str, Callable]] = None, + hidden_layer_bias_initializer_config: Optional[Dict] = None, + output_dim: Optional[int] = None, + output_use_bias: bool = True, + output_activation: Union[str, Callable] = "linear", + output_weights_initializer: Optional[Union[str, Callable]] = None, + output_weights_initializer_config: Optional[Dict] = None, + output_bias_initializer: Optional[Union[str, Callable]] = None, + output_bias_initializer_config: Optional[Dict] = None, + std_init: Optional[float] = 0.1, + ): + """Initialize a TorchMLP object. + + Args: + input_dim: The input dimension of the network. Must not be None. + hidden_layer_dims: The sizes of the hidden layers. If an empty list, only a + single layer will be built of size `output_dim`. + hidden_layer_use_layernorm: Whether to insert a LayerNormalization + functionality in between each hidden layer's output and its activation. + hidden_layer_use_bias: Whether to use bias on all dense layers (excluding + the possible separate output layer). + hidden_layer_activation: The activation function to use after each layer + (except for the output). Either a torch.nn.[activation fn] callable or + the name thereof, or an RLlib recognized activation name, + e.g. "ReLU", "relu", "tanh", "SiLU", or "linear". + hidden_layer_weights_initializer: The initializer function or class to use + forweights initialization in the hidden layers. If `None` the default + initializer of the respective dense layer is used. Note, only the + in-place initializers, i.e. ending with an underscore "_" are allowed. + hidden_layer_weights_initializer_config: Configuration to pass into the + initializer defined in `hidden_layer_weights_initializer`. + hidden_layer_bias_initializer: The initializer function or class to use for + bias initialization in the hidden layers. If `None` the default + initializer of the respective dense layer is used. Note, only the + in-place initializers, i.e. ending with an underscore "_" are allowed. + hidden_layer_bias_initializer_config: Configuration to pass into the + initializer defined in `hidden_layer_bias_initializer`. + output_dim: The output dimension of the network. If None, no specific output + layer will be added and the last layer in the stack will have + size=`hidden_layer_dims[-1]`. + output_use_bias: Whether to use bias on the separate output layer, + if any. + output_activation: The activation function to use for the output layer + (if any). Either a torch.nn.[activation fn] callable or + the name thereof, or an RLlib recognized activation name, + e.g. "ReLU", "relu", "tanh", "SiLU", or "linear". + output_layer_weights_initializer: The initializer function or class to use + for weights initialization in the output layers. If `None` the default + initializer of the respective dense layer is used. Note, only the + in-place initializers, i.e. ending with an underscore "_" are allowed. + output_layer_weights_initializer_config: Configuration to pass into the + initializer defined in `output_layer_weights_initializer`. + output_layer_bias_initializer: The initializer function or class to use for + bias initialization in the output layers. If `None` the default + initializer of the respective dense layer is used. Note, only the + in-place initializers, i.e. ending with an underscore "_" are allowed. + output_layer_bias_initializer_config: Configuration to pass into the + initializer defined in `output_layer_bias_initializer`. + std_init: Initial value of the Gaussian standard deviation before + optimization. Defaults to `0.1`. + """ + super().__init__() + assert input_dim > 0 + + self.input_dim = input_dim + + hidden_activation = get_activation_fn( + hidden_layer_activation, framework="torch" + ) + hidden_weights_initializer = get_initializer_fn( + hidden_layer_weights_initializer, framework="torch" + ) + hidden_bias_initializer = get_initializer_fn( + hidden_layer_bias_initializer, framework="torch" + ) + output_weights_initializer = get_initializer_fn( + output_weights_initializer, framework="torch" + ) + output_bias_initializer = get_initializer_fn( + output_bias_initializer, framework="torch" + ) + + layers = [] + + dims = ( + [self.input_dim] + + list(hidden_layer_dims) + + ([output_dim] if output_dim else []) + ) + for i in range(0, len(dims) - 1): + # Whether we are already processing the last (special) output layer. + is_output_layer = output_dim is not None and i == len(dims) - 2 + + layer = NoisyLinear( + dims[i], + dims[i + 1], + bias=output_use_bias if is_output_layer else hidden_layer_use_bias, + std_init=std_init, + ) + + # Initialize layers, if necessary. + if is_output_layer: + # Initialize output layer weigths if necessary. + if output_weights_initializer: + output_weights_initializer( + layer.weight, **output_weights_initializer_config or {} + ) + # Initialize output layer bias if necessary. + if output_bias_initializer: + output_bias_initializer( + layer.bias, **output_bias_initializer_config or {} + ) + # Must be hidden. + else: + # Initialize hidden layer weights if necessary. + if hidden_layer_weights_initializer: + hidden_weights_initializer( + layer.weight, **hidden_layer_weights_initializer_config or {} + ) + # Initialize hidden layer bias if necessary. + if hidden_layer_bias_initializer: + hidden_bias_initializer( + layer.bias, **hidden_layer_bias_initializer_config or {} + ) + + layers.append(layer) + + # We are still in the hidden layer section: Possibly add layernorm and + # hidden activation. + if not is_output_layer: + # Insert a layer normalization in between layer's output and + # the activation. + if hidden_layer_use_layernorm: + layers.append(nn.LayerNorm(dims[i + 1])) + # Add the activation function. + if hidden_activation is not None: + layers.append(hidden_activation()) + + # Add output layer's (if any) activation. + output_activation = get_activation_fn(output_activation, framework="torch") + if output_dim is not None and output_activation is not None: + layers.append(output_activation()) + + self.mlp = nn.Sequential(*layers) + + self.expected_input_dtype = torch.float32 + + def forward(self, x): + return self.mlp(x.type(self.expected_input_dtype)) + + def _reset_noise(self): + # Reset the noise for all modules (layers). + for module in self.modules(): + if hasattr(module, "reset_noise"): + module.reset_noise() diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/torch/dqn_rainbow_torch_rl_module.py b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/torch/dqn_rainbow_torch_rl_module.py new file mode 100644 index 0000000000000000000000000000000000000000..2c6c5ee35e154b6846dcec09ac7e114921ad8224 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/torch/dqn_rainbow_torch_rl_module.py @@ -0,0 +1,337 @@ +from typing import Dict, Union + +from ray.rllib.algorithms.dqn.dqn_rainbow_rl_module import ( + DQNRainbowRLModule, + ATOMS, + QF_LOGITS, + QF_NEXT_PREDS, + QF_PREDS, + QF_PROBS, + QF_TARGET_NEXT_PREDS, + QF_TARGET_NEXT_PROBS, +) +from ray.rllib.algorithms.dqn.torch.dqn_rainbow_torch_noisy_net import ( + TorchNoisyMLPEncoder, +) +from ray.rllib.core.columns import Columns +from ray.rllib.core.models.base import Encoder, ENCODER_OUT, Model +from ray.rllib.core.rl_module.torch.torch_rl_module import TorchRLModule +from ray.rllib.core.rl_module.rl_module import RLModule +from ray.rllib.utils.annotations import override +from ray.rllib.utils.framework import try_import_torch +from ray.rllib.utils.typing import TensorType, TensorStructType + +torch, nn = try_import_torch() + + +class DQNRainbowTorchRLModule(TorchRLModule, DQNRainbowRLModule): + framework: str = "torch" + + @override(DQNRainbowRLModule) + def setup(self): + super().setup() + + # If we use a noisy encoder. Note, only if the observation + # space is a flat space we can use a noisy encoder. + self.uses_noisy_encoder = isinstance(self.encoder, TorchNoisyMLPEncoder) + + @override(RLModule) + def _forward_inference(self, batch: Dict[str, TensorType]) -> Dict[str, TensorType]: + output = {} + + # Set the module into evaluation mode, if needed. + if self.uses_noisy and self.training: + # This sets the weigths and bias to their constant version. + self.eval() + + # Q-network forward pass. + qf_outs = self._qf(batch) + + # Get action distribution. + action_dist_cls = self.get_exploration_action_dist_cls() + action_dist = action_dist_cls.from_logits(qf_outs[QF_PREDS]) + # Note, the deterministic version of the categorical distribution + # outputs directly the `argmax` of the logits. + exploit_actions = action_dist.to_deterministic().sample() + + # In inference, we only need the exploitation actions. + output[Columns.ACTIONS] = exploit_actions + + return output + + @override(RLModule) + def _forward_exploration( + self, batch: Dict[str, TensorType], t: int + ) -> Dict[str, TensorType]: + output = {} + + # Resample the noise for the noisy layers, if needed. + if self.uses_noisy: + # We want to resample the noise everytime we step. + self._reset_noise(target=False) + if not self.training: + # Set the module into training mode. This sets + # the weigths and bias to their noisy version. + self.train(True) + + # Q-network forward pass. + qf_outs = self._qf(batch) + + # Get action distribution. + action_dist_cls = self.get_exploration_action_dist_cls() + action_dist = action_dist_cls.from_logits(qf_outs[QF_PREDS]) + # Note, the deterministic version of the categorical distribution + # outputs directly the `argmax` of the logits. + exploit_actions = action_dist.to_deterministic().sample() + + # In case of noisy networks the parameter noise is sufficient for + # variation in exploration. + if self.uses_noisy: + # Use the exploitation action (coming from the noisy network). + output[Columns.ACTIONS] = exploit_actions + # Otherwise we need epsilon greedy to support exploration. + else: + # TODO (simon): Implement sampling for nested spaces. + # Update scheduler. + self.epsilon_schedule.update(t) + # Get the actual epsilon, + epsilon = self.epsilon_schedule.get_current_value() + # Apply epsilon-greedy exploration. + B = qf_outs[QF_PREDS].shape[0] + random_actions = torch.squeeze( + torch.multinomial( + (torch.nan_to_num(qf_outs[QF_PREDS], neginf=0.0) != 0.0).float(), + num_samples=1, + ), + dim=1, + ) + output[Columns.ACTIONS] = torch.where( + torch.rand((B,)) < epsilon, + random_actions, + exploit_actions, + ) + + return output + + @override(RLModule) + def _forward_train( + self, batch: Dict[str, TensorType] + ) -> Dict[str, TensorStructType]: + if self.inference_only: + raise RuntimeError( + "Trying to train a module that is not a learner module. Set the " + "flag `inference_only=False` when building the module." + ) + output = {} + + # Set module into training mode. + if self.uses_noisy and not self.training: + # This sets the weigths and bias to their noisy version. + self.train(True) + + # If we use a double-Q setup. + if self.uses_double_q: + # Then we need to make a single forward pass with both, + # current and next observations. + batch_base = { + Columns.OBS: torch.concat( + [batch[Columns.OBS], batch[Columns.NEXT_OBS]], dim=0 + ), + } + # Otherwise we can just use the current observations. + else: + batch_base = {Columns.OBS: batch[Columns.OBS]} + batch_target = {Columns.OBS: batch[Columns.NEXT_OBS]} + + # Q-network forward passes. + qf_outs = self._qf(batch_base) + if self.uses_double_q: + output[QF_PREDS], output[QF_NEXT_PREDS] = torch.chunk( + qf_outs[QF_PREDS], chunks=2, dim=0 + ) + else: + output[QF_PREDS] = qf_outs[QF_PREDS] + # The target Q-values for the next observations. + qf_target_next_outs = self.forward_target(batch_target) + output[QF_TARGET_NEXT_PREDS] = qf_target_next_outs[QF_PREDS] + # We are learning a Q-value distribution. + if self.num_atoms > 1: + # Add distribution artefacts to the output. + # Distribution support. + output[ATOMS] = qf_target_next_outs[ATOMS] + # Original logits from the Q-head. + output[QF_LOGITS] = qf_outs[QF_LOGITS] + # Probabilities of the Q-value distribution of the current state. + output[QF_PROBS] = qf_outs[QF_PROBS] + # Probabilities of the target Q-value distribution of the next state. + output[QF_TARGET_NEXT_PROBS] = qf_target_next_outs[QF_PROBS] + + return output + + @override(DQNRainbowRLModule) + def _af_dist(self, batch: Dict[str, TensorType]) -> Dict[str, TensorType]: + """Compute the advantage distribution. + + Note this distribution is identical to the Q-distribution in + case no dueling architecture is used. + + Args: + batch: A dictionary containing a tensor with the outputs of the + forward pass of the Q-head or advantage stream head. + + Returns: + A `dict` containing the support of the discrete distribution for + either Q-values or advantages (in case of a dueling architecture), + ("atoms"), the logits per action and atom and the probabilities + of the discrete distribution (per action and atom of the support). + """ + output = {} + # Distributional Q-learning uses a discrete support `z` + # to represent the action value distribution. + # TODO (simon): Check, if we still need here the device for torch. + z = torch.arange(0.0, self.num_atoms, dtype=torch.float32).to( + batch.device, + ) + # Rescale the support. + z = self.v_min + z * (self.v_max - self.v_min) / float(self.num_atoms - 1) + # Reshape the action values. + # NOTE: Handcrafted action shape. + logits_per_action_per_atom = torch.reshape( + batch, shape=(-1, self.action_space.n, self.num_atoms) + ) + # Calculate the probability for each action value atom. Note, + # the sum along action value atoms of a single action value + # must sum to one. + prob_per_action_per_atom = nn.functional.softmax( + logits_per_action_per_atom, + dim=-1, + ) + # Compute expected action value by weighted sum. + output[ATOMS] = z + output["logits"] = logits_per_action_per_atom + output["probs"] = prob_per_action_per_atom + + return output + + # TODO (simon): Test, if providing the function with a `return_probs` + # improves performance significantly. + @override(DQNRainbowRLModule) + def _qf_forward_helper( + self, + batch: Dict[str, TensorType], + encoder: Encoder, + head: Union[Model, Dict[str, Model]], + ) -> Dict[str, TensorType]: + """Computes Q-values. + + This is a helper function that takes care of all different cases, + i.e. if we use a dueling architecture or not and if we use distributional + Q-learning or not. + + Args: + batch: The batch received in the forward pass. + encoder: The encoder network to use. Here we have a single encoder + for all heads (Q or advantages and value in case of a dueling + architecture). + head: Either a head model or a dictionary of head model (dueling + architecture) containing advantage and value stream heads. + + Returns: + In case of expectation learning the Q-value predictions ("qf_preds") + and in case of distributional Q-learning in addition to the predictions + the atoms ("atoms"), the Q-value predictions ("qf_preds"), the Q-logits + ("qf_logits") and the probabilities for the support atoms ("qf_probs"). + """ + output = {} + + # Encoder forward pass. + encoder_outs = encoder(batch) + + # Do we have a dueling architecture. + if self.uses_dueling: + # Head forward passes for advantage and value stream. + qf_outs = head["af"](encoder_outs[ENCODER_OUT]) + vf_outs = head["vf"](encoder_outs[ENCODER_OUT]) + # We learn a Q-value distribution. + if self.num_atoms > 1: + # Compute the advantage stream distribution. + af_dist_output = self._af_dist(qf_outs) + # Center the advantage stream distribution. + centered_af_logits = af_dist_output["logits"] - af_dist_output[ + "logits" + ].mean(dim=1, keepdim=True) + # Calculate the Q-value distribution by adding advantage and + # value stream. + qf_logits = centered_af_logits + vf_outs.unsqueeze(dim=-1) + # Calculate probabilites for the Q-value distribution along + # the support given by the atoms. + qf_probs = nn.functional.softmax(qf_logits, dim=-1) + # Return also the support as we need it in the learner. + output[ATOMS] = af_dist_output[ATOMS] + # Calculate the Q-values by the weighted sum over the atoms. + output[QF_PREDS] = torch.sum(af_dist_output[ATOMS] * qf_probs, dim=-1) + output[QF_LOGITS] = qf_logits + output[QF_PROBS] = qf_probs + # Otherwise we learn an expectation. + else: + # Center advantages. Note, we cannot do an in-place operation here + # b/c we backpropagate through these values. See for a discussion + # https://discuss.pytorch.org/t/gradient-computation-issue-due-to- + # inplace-operation-unsure-how-to-debug-for-custom-model/170133 + # Has to be a mean for each batch element. + af_outs_mean = torch.unsqueeze( + torch.nan_to_num(qf_outs, neginf=torch.nan).nanmean(dim=1), dim=1 + ) + qf_outs = qf_outs - af_outs_mean + # Add advantage and value stream. Note, we broadcast here. + output[QF_PREDS] = qf_outs + vf_outs + # No dueling architecture. + else: + # Note, in this case the advantage network is the Q-network. + # Forward pass through Q-head. + qf_outs = head(encoder_outs[ENCODER_OUT]) + # We learn a Q-value distribution. + if self.num_atoms > 1: + # Note in a non-dueling architecture the advantage distribution is + # the Q-value distribution. + # Get the Q-value distribution. + qf_dist_outs = self._af_dist(qf_outs) + # Get the support of the Q-value distribution. + output[ATOMS] = qf_dist_outs[ATOMS] + # Calculate the Q-values by the weighted sum over the atoms. + output[QF_PREDS] = torch.sum( + qf_dist_outs[ATOMS] * qf_dist_outs["probs"], dim=-1 + ) + output[QF_LOGITS] = qf_dist_outs["logits"] + output[QF_PROBS] = qf_dist_outs["probs"] + # Otherwise we learn an expectation. + else: + # In this case we have a Q-head of dimension (1, action_space.n). + output[QF_PREDS] = qf_outs + + return output + + @override(DQNRainbowRLModule) + def _reset_noise(self, target: bool = False) -> None: + """Reset the noise of all noisy layers. + + Args: + target: Whether to reset the noise of the target networks. + """ + if self.uses_noisy: + if self.uses_noisy_encoder: + self.encoder._reset_noise() + self.af._reset_noise() + # If we have a dueling architecture we need to reset the noise + # of the value stream, too. + if self.uses_dueling: + self.vf._reset_noise() + # Reset the noise of the target networks, if requested. + if target: + if self.uses_noisy_encoder: + self._target_encoder._reset_noise() + self._target_af._reset_noise() + # If we have a dueling architecture we need to reset the noise + # of the value stream, too. + if self.uses_dueling: + self._target_vf._reset_noise() diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/torch/torch_noisy_linear.py b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/torch/torch_noisy_linear.py new file mode 100644 index 0000000000000000000000000000000000000000..cba1aa86e636f9b9dc09113cf85f18cadfdb7481 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/torch/torch_noisy_linear.py @@ -0,0 +1,147 @@ +import math +from typing import Optional, Sequence, Union +from ray.rllib.utils.framework import try_import_torch + +torch, nn = try_import_torch() + +DEVICE_TYPING = Union[torch.device, str, int] + + +class NoisyLinear(nn.Linear): + """Noisy Linear Layer. + + Presented in "Noisy Networks for Exploration", + https://arxiv.org/abs/1706.10295v3, implemented in relation to + `torchrl`'s `NoisyLinear` layer. + + A Noisy Linear Layer is a linear layer with parametric noise added to + the weights. This induced stochasticity can be used in RL networks for + the agent's policy to aid efficient exploration. The parameters of the + noise are learned with gradient descent along with any other remaining + network weights. Factorized Gaussian noise is the type of noise usually + employed. + + + Args: + in_features: Input features dimension. + out_features: Out features dimension. + bias: If `True`, a bias term will be added to the matrix + multiplication: `Ax + b`. Defaults to `True`. + device: Device of the layer. Defaults to `"cpu"`. + dtype: `dtype` of the parameters. Defaults to `None` (default `torch` + `dtype`). + std_init: Initial value of the Gaussian standard deviation before + optimization. Defaults to `0.1`. + + """ + + def __init__( + self, + in_features: int, + out_features: int, + bias: bool = True, + device: Optional[DEVICE_TYPING] = None, + dtype: Optional[torch.dtype] = None, + std_init: float = 0.1, + ): + nn.Module.__init__(self) + self.in_features = int(in_features) + self.out_features = int(out_features) + self.std_init = std_init + + self.weight_mu = nn.Parameter( + torch.empty( + out_features, + in_features, + device=device, + dtype=dtype, + requires_grad=True, + ) + ) + self.weight_sigma = nn.Parameter( + torch.empty( + out_features, + in_features, + device=device, + dtype=dtype, + requires_grad=True, + ) + ) + self.register_buffer( + "weight_epsilon", + torch.empty(out_features, in_features, device=device, dtype=dtype), + ) + if bias: + self.bias_mu = nn.Parameter( + torch.empty( + out_features, + device=device, + dtype=dtype, + requires_grad=True, + ) + ) + self.bias_sigma = nn.Parameter( + torch.empty( + out_features, + device=device, + dtype=dtype, + requires_grad=True, + ) + ) + self.register_buffer( + "bias_epsilon", + torch.empty(out_features, device=device, dtype=dtype), + ) + else: + self.bias_mu = None + self.reset_parameters() + self.reset_noise() + self.training = True + + @torch.no_grad() + def reset_parameters(self) -> None: + # Use initialization for factorized noisy linear layers. + mu_range = 1 / math.sqrt(self.in_features) + # Initialize weight distribution parameters. + self.weight_mu.data.uniform_(-mu_range, mu_range) + self.weight_sigma.data.fill_(self.std_init / math.sqrt(self.in_features)) + # If bias is used initial these parameters, too. + if self.bias_mu is not None: + self.bias_mu.data.zero_() # (-mu_range, mu_range) + self.bias_sigma.data.fill_(self.std_init / math.sqrt(self.out_features)) + + @torch.no_grad() + def reset_noise(self) -> None: + with torch.no_grad(): + # Use factorized noise for better performance. + epsilon_in = self._scale_noise(self.in_features) + epsilon_out = self._scale_noise(self.out_features) + self.weight_epsilon.copy_(epsilon_out.outer(epsilon_in)) + if self.bias_mu is not None: + self.bias_epsilon.copy_(epsilon_out) + + @torch.no_grad() + def _scale_noise(self, size: Union[int, torch.Size, Sequence]) -> torch.Tensor: + if isinstance(size, int): + size = (size,) + x = torch.randn(*size, device=self.weight_mu.device) + return x.sign().mul_(x.abs().sqrt_()) + + @property + def weight(self) -> torch.Tensor: + if self.training: + # If in training mode, sample the noise. + return self.weight_mu + self.weight_sigma * self.weight_epsilon + else: + return self.weight_mu + + @property + def bias(self) -> Optional[torch.Tensor]: + if self.bias_mu is not None: + if self.training: + # If in training mode, sample the noise. + return self.bias_mu + self.bias_sigma * self.bias_epsilon + else: + return self.bias_mu + else: + return None diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/__init__.py b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d4b2adb0d57ed1d44ced601c57fa78c91bc4bede --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/__init__.py @@ -0,0 +1,15 @@ +""" +[1] Mastering Diverse Domains through World Models - 2023 +D. Hafner, J. Pasukonis, J. Ba, T. Lillicrap +https://arxiv.org/pdf/2301.04104v1.pdf + +[2] Mastering Atari with Discrete World Models - 2021 +D. Hafner, T. Lillicrap, M. Norouzi, J. Ba +https://arxiv.org/pdf/2010.02193.pdf +""" +from ray.rllib.algorithms.dreamerv3.dreamerv3 import DreamerV3, DreamerV3Config + +__all__ = [ + "DreamerV3", + "DreamerV3Config", +] diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/__pycache__/__init__.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d92e5c98dbff44d2417759086a86dd5ce657f6e2 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/__pycache__/__init__.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/__pycache__/dreamerv3.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/__pycache__/dreamerv3.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ffbab0ea8b37506e4ca4b878c0b5697b2ee9033 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/__pycache__/dreamerv3.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/__pycache__/dreamerv3_catalog.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/__pycache__/dreamerv3_catalog.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be6491d0691f4efb27d5e235f12d8a63b24b12cc Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/__pycache__/dreamerv3_catalog.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/__pycache__/dreamerv3_learner.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/__pycache__/dreamerv3_learner.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73c596d7c344df831448b8ace7058a842e5e2cca Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/__pycache__/dreamerv3_learner.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/__pycache__/dreamerv3_rl_module.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/__pycache__/dreamerv3_rl_module.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a75ae37bf6d520e7a03dd55fee6464c650295633 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/__pycache__/dreamerv3_rl_module.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/dreamerv3_catalog.py b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/dreamerv3_catalog.py new file mode 100644 index 0000000000000000000000000000000000000000..158ecedcf75f087a11e1c859b6ff4d57f084cdcd --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/dreamerv3_catalog.py @@ -0,0 +1,80 @@ +import gymnasium as gym + +from ray.rllib.core.models.catalog import Catalog +from ray.rllib.core.models.base import Encoder, Model +from ray.rllib.utils import override + + +class DreamerV3Catalog(Catalog): + """The Catalog class used to build all the models needed for DreamerV3 training.""" + + def __init__( + self, + observation_space: gym.Space, + action_space: gym.Space, + model_config_dict: dict, + ): + """Initializes a DreamerV3Catalog instance. + + Args: + observation_space: The observation space of the environment. + action_space: The action space of the environment. + model_config_dict: The model config to use. + """ + super().__init__( + observation_space=observation_space, + action_space=action_space, + model_config_dict=model_config_dict, + ) + + self.model_size = self._model_config_dict["model_size"] + self.is_img_space = len(self.observation_space.shape) in [2, 3] + self.is_gray_scale = ( + self.is_img_space and len(self.observation_space.shape) == 2 + ) + + # TODO (sven): We should work with sub-component configurations here, + # and even try replacing all current Dreamer model components with + # our default primitives. But for now, we'll construct the DreamerV3Model + # directly in our `build_...()` methods. + + @override(Catalog) + def build_encoder(self, framework: str) -> Encoder: + """Builds the World-Model's encoder network depending on the obs space.""" + if framework != "tf2": + raise NotImplementedError + + if self.is_img_space: + from ray.rllib.algorithms.dreamerv3.tf.models.components.cnn_atari import ( + CNNAtari, + ) + + return CNNAtari(model_size=self.model_size) + else: + from ray.rllib.algorithms.dreamerv3.tf.models.components.mlp import MLP + + return MLP(model_size=self.model_size, name="vector_encoder") + + def build_decoder(self, framework: str) -> Model: + """Builds the World-Model's decoder network depending on the obs space.""" + if framework != "tf2": + raise NotImplementedError + + if self.is_img_space: + from ray.rllib.algorithms.dreamerv3.tf.models.components import ( + conv_transpose_atari, + ) + + return conv_transpose_atari.ConvTransposeAtari( + model_size=self.model_size, + gray_scaled=self.is_gray_scale, + ) + else: + from ray.rllib.algorithms.dreamerv3.tf.models.components import ( + vector_decoder, + ) + + return vector_decoder.VectorDecoder( + model_size=self.model_size, + observation_space=self.observation_space, + ) diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/dreamerv3_learner.py b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/dreamerv3_learner.py new file mode 100644 index 0000000000000000000000000000000000000000..6c23be816ff9f5e30a2cc18b70bef54678648cb6 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/dreamerv3_learner.py @@ -0,0 +1,31 @@ +""" +[1] Mastering Diverse Domains through World Models - 2023 +D. Hafner, J. Pasukonis, J. Ba, T. Lillicrap +https://arxiv.org/pdf/2301.04104v1.pdf + +[2] Mastering Atari with Discrete World Models - 2021 +D. Hafner, T. Lillicrap, M. Norouzi, J. Ba +https://arxiv.org/pdf/2010.02193.pdf +""" +from ray.rllib.core.learner.learner import Learner +from ray.rllib.utils.annotations import ( + override, + OverrideToImplementCustomLogic_CallToSuperRecommended, +) + + +class DreamerV3Learner(Learner): + """DreamerV3 specific Learner class. + + Only implements the `after_gradient_based_update()` method to define the logic + for updating the critic EMA-copy after each training step. + """ + + @OverrideToImplementCustomLogic_CallToSuperRecommended + @override(Learner) + def after_gradient_based_update(self, *, timesteps): + super().after_gradient_based_update(timesteps=timesteps) + + # Update EMA weights of the critic. + for module_id, module in self.module._rl_modules.items(): + module.critic.update_ema() diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/__init__.py b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/__pycache__/dreamerv3_tf_learner.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/__pycache__/dreamerv3_tf_learner.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a687cd7693820c2948009e5af6036ce874a130d0 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/__pycache__/dreamerv3_tf_learner.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/__pycache__/critic_network.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/__pycache__/critic_network.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ba4fc48432564f516f5ca201e3a4b85c2929b3d Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/__pycache__/critic_network.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/actor_network.py b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/actor_network.py new file mode 100644 index 0000000000000000000000000000000000000000..c4bc6cd9336291a88bd2825edab2c7e3dbfcc5af --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/actor_network.py @@ -0,0 +1,203 @@ +""" +[1] Mastering Diverse Domains through World Models - 2023 +D. Hafner, J. Pasukonis, J. Ba, T. Lillicrap +https://arxiv.org/pdf/2301.04104v1.pdf +""" +import gymnasium as gym +from gymnasium.spaces import Box, Discrete +import numpy as np + +from ray.rllib.algorithms.dreamerv3.tf.models.components.mlp import MLP +from ray.rllib.algorithms.dreamerv3.utils import ( + get_gru_units, + get_num_z_categoricals, + get_num_z_classes, +) +from ray.rllib.utils.framework import try_import_tf, try_import_tfp + +_, tf, _ = try_import_tf() +tfp = try_import_tfp() + + +class ActorNetwork(tf.keras.Model): + """The `actor` (policy net) of DreamerV3. + + Consists of a simple MLP for Discrete actions and two MLPs for cont. actions (mean + and stddev). + Also contains two scalar variables to keep track of the percentile-5 and + percentile-95 values of the computed value targets within a batch. This is used to + compute the "scaled value targets" for actor learning. These two variables decay + over time exponentially (see [1] for more details). + """ + + def __init__( + self, + *, + model_size: str = "XS", + action_space: gym.Space, + ): + """Initializes an ActorNetwork instance. + + Args: + model_size: The "Model Size" used according to [1] Appendix B. + Use None for manually setting the different network sizes. + action_space: The action space of the environment used. + """ + super().__init__(name="actor") + + self.model_size = model_size + self.action_space = action_space + + # The EMA decay variables used for the [Percentile(R, 95%) - Percentile(R, 5%)] + # diff to scale value targets for the actor loss. + self.ema_value_target_pct5 = tf.Variable( + np.nan, trainable=False, name="value_target_pct5" + ) + self.ema_value_target_pct95 = tf.Variable( + np.nan, trainable=False, name="value_target_pct95" + ) + + # For discrete actions, use a single MLP that computes logits. + if isinstance(self.action_space, Discrete): + self.mlp = MLP( + model_size=self.model_size, + output_layer_size=self.action_space.n, + name="actor_mlp", + ) + # For cont. actions, use separate MLPs for Gaussian mean and stddev. + # TODO (sven): In the author's original code repo, this is NOT the case, + # inputs are pushed through a shared MLP, then only the two output linear + # layers are separate for std- and mean logits. + elif isinstance(action_space, Box): + output_layer_size = np.prod(action_space.shape) + self.mlp = MLP( + model_size=self.model_size, + output_layer_size=output_layer_size, + name="actor_mlp_mean", + ) + self.std_mlp = MLP( + model_size=self.model_size, + output_layer_size=output_layer_size, + name="actor_mlp_std", + ) + else: + raise ValueError(f"Invalid action space: {action_space}") + + # Trace self.call. + dl_type = tf.keras.mixed_precision.global_policy().compute_dtype or tf.float32 + self.call = tf.function( + input_signature=[ + tf.TensorSpec(shape=[None, get_gru_units(model_size)], dtype=dl_type), + tf.TensorSpec( + shape=[ + None, + get_num_z_categoricals(model_size), + get_num_z_classes(model_size), + ], + dtype=dl_type, + ), + ] + )(self.call) + + def call(self, h, z): + """Performs a forward pass through this policy network. + + Args: + h: The deterministic hidden state of the sequence model. [B, dim(h)]. + z: The stochastic discrete representations of the original + observation input. [B, num_categoricals, num_classes]. + """ + # Flatten last two dims of z. + assert len(z.shape) == 3 + z_shape = tf.shape(z) + z = tf.reshape(z, shape=(z_shape[0], -1)) + assert len(z.shape) == 2 + out = tf.concat([h, z], axis=-1) + out.set_shape( + [ + None, + ( + get_num_z_categoricals(self.model_size) + * get_num_z_classes(self.model_size) + + get_gru_units(self.model_size) + ), + ] + ) + # Send h-cat-z through MLP. + action_logits = tf.cast(self.mlp(out), tf.float32) + + if isinstance(self.action_space, Discrete): + action_probs = tf.nn.softmax(action_logits) + + # Add the unimix weighting (1% uniform) to the probs. + # See [1]: "Unimix categoricals: We parameterize the categorical + # distributions for the world model representations and dynamics, as well as + # for the actor network, as mixtures of 1% uniform and 99% neural network + # output to ensure a minimal amount of probability mass on every class and + # thus keep log probabilities and KL divergences well behaved." + action_probs = 0.99 * action_probs + 0.01 * (1.0 / self.action_space.n) + + # Danijar's code does: distr = [Distr class](logits=tf.log(probs)). + # Not sure why we don't directly use the already available probs instead. + action_logits = tf.math.log(action_probs) + + # Distribution parameters are the log(probs) directly. + distr_params = action_logits + distr = self.get_action_dist_object(distr_params) + + action = tf.stop_gradient(distr.sample()) + ( + action_probs - tf.stop_gradient(action_probs) + ) + + elif isinstance(self.action_space, Box): + # Send h-cat-z through MLP to compute stddev logits for Normal dist + std_logits = tf.cast(self.std_mlp(out), tf.float32) + # minstd, maxstd taken from [1] from configs.yaml + minstd = 0.1 + maxstd = 1.0 + + # Distribution parameters are the squashed std_logits and the tanh'd + # mean logits. + # squash std_logits from (-inf, inf) to (minstd, maxstd) + std_logits = (maxstd - minstd) * tf.sigmoid(std_logits + 2.0) + minstd + mean_logits = tf.tanh(action_logits) + + distr_params = tf.concat([mean_logits, std_logits], axis=-1) + distr = self.get_action_dist_object(distr_params) + + action = distr.sample() + + return action, distr_params + + def get_action_dist_object(self, action_dist_params_T_B): + """Helper method to create an action distribution object from (T, B, ..) params. + + Args: + action_dist_params_T_B: The time-major action distribution parameters. + This could be simply the logits (discrete) or a to-be-split-in-2 + tensor for mean and stddev (continuous). + + Returns: + The tfp action distribution object, from which one can sample, compute + log probs, entropy, etc.. + """ + if isinstance(self.action_space, gym.spaces.Discrete): + # Create the distribution object using the unimix'd logits. + distr = tfp.distributions.OneHotCategorical( + logits=action_dist_params_T_B, + dtype=tf.float32, + ) + + elif isinstance(self.action_space, gym.spaces.Box): + # Compute Normal distribution from action_logits and std_logits + loc, scale = tf.split(action_dist_params_T_B, 2, axis=-1) + distr = tfp.distributions.Normal(loc=loc, scale=scale) + + # If action_space is a box with multiple dims, make individual dims + # independent. + distr = tfp.distributions.Independent(distr, len(self.action_space.shape)) + + else: + raise ValueError(f"Action space {self.action_space} not supported!") + + return distr diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/components/__init__.py b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/components/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/components/__pycache__/dynamics_predictor.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/components/__pycache__/dynamics_predictor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5284241754c73e213be19327d0da0d95f5c8213c Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/components/__pycache__/dynamics_predictor.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/components/__pycache__/representation_layer.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/components/__pycache__/representation_layer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..432a48674e2f08d65ab1187dd7e2520e50e3e159 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/components/__pycache__/representation_layer.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/components/__pycache__/sequence_model.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/components/__pycache__/sequence_model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..571a8bafe063fab975a2bc79513fee67f43b63d8 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/components/__pycache__/sequence_model.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/components/cnn_atari.py b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/components/cnn_atari.py new file mode 100644 index 0000000000000000000000000000000000000000..c0f7ee09b092bc7923279ac7836f15ee5db70de8 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/components/cnn_atari.py @@ -0,0 +1,112 @@ +""" +[1] Mastering Diverse Domains through World Models - 2023 +D. Hafner, J. Pasukonis, J. Ba, T. Lillicrap +https://arxiv.org/pdf/2301.04104v1.pdf +""" +from typing import Optional + +from ray.rllib.algorithms.dreamerv3.utils import get_cnn_multiplier +from ray.rllib.utils.framework import try_import_tf + +_, tf, _ = try_import_tf() + + +class CNNAtari(tf.keras.Model): + """An image encoder mapping 64x64 RGB images via 4 CNN layers into a 1D space.""" + + def __init__( + self, + *, + model_size: Optional[str] = "XS", + cnn_multiplier: Optional[int] = None, + ): + """Initializes a CNNAtari instance. + + Args: + model_size: The "Model Size" used according to [1] Appendix B. + Use None for manually setting the `cnn_multiplier`. + cnn_multiplier: Optional override for the additional factor used to multiply + the number of filters with each CNN layer. Starting with + 1 * `cnn_multiplier` filters in the first CNN layer, the number of + filters then increases via `2*cnn_multiplier`, `4*cnn_multiplier`, till + `8*cnn_multiplier`. + """ + super().__init__(name="image_encoder") + + cnn_multiplier = get_cnn_multiplier(model_size, override=cnn_multiplier) + + # See appendix C in [1]: + # "We use a similar network architecture but employ layer normalization and + # SiLU as the activation function. For better framework support, we use + # same-padded convolutions with stride 2 and kernel size 3 instead of + # valid-padded convolutions with larger kernels ..." + # HOWEVER: In Danijar's DreamerV3 repo, kernel size=4 is used, so we use it + # here, too. + self.conv_layers = [ + tf.keras.layers.Conv2D( + filters=1 * cnn_multiplier, + kernel_size=4, + strides=(2, 2), + padding="same", + # No bias or activation due to layernorm. + activation=None, + use_bias=False, + ), + tf.keras.layers.Conv2D( + filters=2 * cnn_multiplier, + kernel_size=4, + strides=(2, 2), + padding="same", + # No bias or activation due to layernorm. + activation=None, + use_bias=False, + ), + tf.keras.layers.Conv2D( + filters=4 * cnn_multiplier, + kernel_size=4, + strides=(2, 2), + padding="same", + # No bias or activation due to layernorm. + activation=None, + use_bias=False, + ), + # .. until output is 4 x 4 x [num_filters]. + tf.keras.layers.Conv2D( + filters=8 * cnn_multiplier, + kernel_size=4, + strides=(2, 2), + padding="same", + # No bias or activation due to layernorm. + activation=None, + use_bias=False, + ), + ] + self.layer_normalizations = [] + for _ in range(len(self.conv_layers)): + self.layer_normalizations.append(tf.keras.layers.LayerNormalization()) + # -> 4 x 4 x num_filters -> now flatten. + self.flatten_layer = tf.keras.layers.Flatten(data_format="channels_last") + + @tf.function( + input_signature=[ + tf.TensorSpec( + shape=[None, 64, 64, 3], + dtype=tf.keras.mixed_precision.global_policy().compute_dtype + or tf.float32, + ) + ] + ) + def call(self, inputs): + """Performs a forward pass through the CNN Atari encoder. + + Args: + inputs: The image inputs of shape (B, 64, 64, 3). + """ + # [B, h, w] -> grayscale. + if len(inputs.shape) == 3: + inputs = tf.expand_dims(inputs, -1) + out = inputs + for conv_2d, layer_norm in zip(self.conv_layers, self.layer_normalizations): + out = tf.nn.silu(layer_norm(inputs=conv_2d(out))) + assert out.shape[1] == 4 and out.shape[2] == 4 + return self.flatten_layer(out) diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/components/reward_predictor.py b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/components/reward_predictor.py new file mode 100644 index 0000000000000000000000000000000000000000..3e7cb6de93f97c4268d9067217b7be60624f0073 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/components/reward_predictor.py @@ -0,0 +1,112 @@ +""" +[1] Mastering Diverse Domains through World Models - 2023 +D. Hafner, J. Pasukonis, J. Ba, T. Lillicrap +https://arxiv.org/pdf/2301.04104v1.pdf +""" +from ray.rllib.algorithms.dreamerv3.tf.models.components.mlp import MLP +from ray.rllib.algorithms.dreamerv3.tf.models.components.reward_predictor_layer import ( + RewardPredictorLayer, +) +from ray.rllib.algorithms.dreamerv3.utils import ( + get_gru_units, + get_num_z_categoricals, + get_num_z_classes, +) +from ray.rllib.utils.framework import try_import_tf + +_, tf, _ = try_import_tf() + + +class RewardPredictor(tf.keras.Model): + """Wrapper of MLP and RewardPredictorLayer to predict rewards for the world model. + + Predicted rewards are used to produce "dream data" to learn the policy in. + """ + + def __init__( + self, + *, + model_size: str = "XS", + num_buckets: int = 255, + lower_bound: float = -20.0, + upper_bound: float = 20.0, + ): + """Initializes a RewardPredictor instance. + + Args: + model_size: The "Model Size" used according to [1] Appendinx B. + Determines the exact size of the underlying MLP. + num_buckets: The number of buckets to create. Note that the number of + possible symlog'd outcomes from the used distribution is + `num_buckets` + 1: + lower_bound --bucket-- o[1] --bucket-- o[2] ... --bucket-- upper_bound + o=outcomes + lower_bound=o[0] + upper_bound=o[num_buckets] + lower_bound: The symlog'd lower bound for a possible reward value. + Note that a value of -20.0 here already allows individual (actual env) + rewards to be as low as -400M. Buckets will be created between + `lower_bound` and `upper_bound`. + upper_bound: The symlog'd upper bound for a possible reward value. + Note that a value of +20.0 here already allows individual (actual env) + rewards to be as high as 400M. Buckets will be created between + `lower_bound` and `upper_bound`. + """ + super().__init__(name="reward_predictor") + self.model_size = model_size + + self.mlp = MLP( + model_size=model_size, + output_layer_size=None, + ) + self.reward_layer = RewardPredictorLayer( + num_buckets=num_buckets, + lower_bound=lower_bound, + upper_bound=upper_bound, + ) + + # Trace self.call. + dl_type = tf.keras.mixed_precision.global_policy().compute_dtype or tf.float32 + self.call = tf.function( + input_signature=[ + tf.TensorSpec(shape=[None, get_gru_units(model_size)], dtype=dl_type), + tf.TensorSpec( + shape=[ + None, + get_num_z_categoricals(model_size), + get_num_z_classes(model_size), + ], + dtype=dl_type, + ), + ] + )(self.call) + + def call(self, h, z): + """Computes the expected reward using N equal sized buckets of possible values. + + Args: + h: The deterministic hidden state of the sequence model. [B, dim(h)]. + z: The stochastic discrete representations of the original + observation input. [B, num_categoricals, num_classes]. + """ + # Flatten last two dims of z. + assert len(z.shape) == 3 + z_shape = tf.shape(z) + z = tf.reshape(z, shape=(z_shape[0], -1)) + assert len(z.shape) == 2 + out = tf.concat([h, z], axis=-1) + out.set_shape( + [ + None, + ( + get_num_z_categoricals(self.model_size) + * get_num_z_classes(self.model_size) + + get_gru_units(self.model_size) + ), + ] + ) + # Send h-cat-z through MLP. + out = self.mlp(out) + # Return a) mean reward OR b) a tuple: (mean reward, logits over the reward + # buckets). + return self.reward_layer(out) diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/components/reward_predictor_layer.py b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/components/reward_predictor_layer.py new file mode 100644 index 0000000000000000000000000000000000000000..d68f62cb6780f2ef044bb8f091c727b49b16e390 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/components/reward_predictor_layer.py @@ -0,0 +1,110 @@ +""" +[1] Mastering Diverse Domains through World Models - 2023 +D. Hafner, J. Pasukonis, J. Ba, T. Lillicrap +https://arxiv.org/pdf/2301.04104v1.pdf + +[2] Mastering Atari with Discrete World Models - 2021 +D. Hafner, T. Lillicrap, M. Norouzi, J. Ba +https://arxiv.org/pdf/2010.02193.pdf +""" +from ray.rllib.utils.framework import try_import_tf + +_, tf, _ = try_import_tf() + + +class RewardPredictorLayer(tf.keras.layers.Layer): + """A layer outputting reward predictions using K bins and two-hot encoding. + + This layer is used in two models in DreamerV3: The reward predictor of the world + model and the value function. K is 255 by default (see [1]) and doesn't change + with the model size. + + Possible predicted reward/values range from symexp(-20.0) to symexp(20.0), which + should cover any possible environment. Outputs of this layer are generated by + generating logits/probs via a single linear layer, then interpreting the probs + as weights for a weighted average of the different possible reward (binned) values. + """ + + def __init__( + self, + *, + num_buckets: int = 255, + lower_bound: float = -20.0, + upper_bound: float = 20.0, + trainable: bool = True, + ): + """Initializes a RewardPredictorLayer instance. + + Args: + num_buckets: The number of buckets to create. Note that the number of + possible symlog'd outcomes from the used distribution is + `num_buckets` + 1: + lower_bound --bucket-- o[1] --bucket-- o[2] ... --bucket-- upper_bound + o=outcomes + lower_bound=o[0] + upper_bound=o[num_buckets] + lower_bound: The symlog'd lower bound for a possible reward value. + Note that a value of -20.0 here already allows individual (actual env) + rewards to be as low as -400M. Buckets will be created between + `lower_bound` and `upper_bound`. + upper_bound: The symlog'd upper bound for a possible reward value. + Note that a value of +20.0 here already allows individual (actual env) + rewards to be as high as 400M. Buckets will be created between + `lower_bound` and `upper_bound`. + """ + self.num_buckets = num_buckets + super().__init__(name=f"reward_layer_{self.num_buckets}buckets") + + self.lower_bound = lower_bound + self.upper_bound = upper_bound + self.reward_buckets_layer = tf.keras.layers.Dense( + units=self.num_buckets, + activation=None, + # From [1]: + # "We further noticed that the randomly initialized reward predictor and + # critic networks at the start of training can result in large predicted + # rewards that can delay the onset of learning. We initialize the output + # weights of the reward predictor and critic to zeros, which effectively + # alleviates the problem and accelerates early learning." + kernel_initializer="zeros", + bias_initializer="zeros", # zero-bias is default anyways + trainable=trainable, + ) + + def call(self, inputs): + """Computes the expected reward using N equal sized buckets of possible values. + + Args: + inputs: The input tensor for the layer, which computes the reward bucket + weights (logits). [B, dim]. + + Returns: + A tuple consisting of the expected rewards and the logits that parameterize + the tfp `FiniteDiscrete` distribution object. To get the individual bucket + probs, do `[FiniteDiscrete object].probs`. + """ + # Compute the `num_buckets` weights. + assert len(inputs.shape) == 2 + logits = tf.cast(self.reward_buckets_layer(inputs), tf.float32) + # out=[B, `num_buckets`] + + # Compute the expected(!) reward using the formula: + # `softmax(Linear(x))` [vectordot] `possible_outcomes`, where + # `possible_outcomes` is the even-spaced (binned) encoding of all possible + # symexp'd reward/values. + # [2]: "The mean of the reward predictor pφ(ˆrt | zˆt) is used as reward + # sequence rˆ1:H." + probs = tf.nn.softmax(logits) + possible_outcomes = tf.linspace( + self.lower_bound, + self.upper_bound, + self.num_buckets, + ) + # probs=possible_outcomes=[B, `num_buckets`] + + # Simple vector dot product (over last dim) to get the mean reward + # weighted sum, where all weights sum to 1.0. + expected_rewards = tf.reduce_sum(probs * possible_outcomes, axis=-1) + # expected_rewards=[B] + + return expected_rewards, logits diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/components/sequence_model.py b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/components/sequence_model.py new file mode 100644 index 0000000000000000000000000000000000000000..fa9666029ce30de815b817e2c956a0bae97b816d --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/components/sequence_model.py @@ -0,0 +1,144 @@ +""" +[1] Mastering Diverse Domains through World Models - 2023 +D. Hafner, J. Pasukonis, J. Ba, T. Lillicrap +https://arxiv.org/pdf/2301.04104v1.pdf +""" +from typing import Optional + +import gymnasium as gym +import numpy as np + +from ray.rllib.algorithms.dreamerv3.tf.models.components.mlp import MLP +from ray.rllib.algorithms.dreamerv3.utils import ( + get_gru_units, + get_num_z_classes, + get_num_z_categoricals, +) +from ray.rllib.utils.framework import try_import_tf + +_, tf, _ = try_import_tf() + + +class SequenceModel(tf.keras.Model): + """The "sequence model" of the RSSM, computing ht+1 given (ht, zt, at). + + Note: The "internal state" always consists of: + The actions `a` (initially, this is a zeroed-out action), `h`-states (deterministic, + continuous), and `z`-states (stochastic, discrete). + There are two versions of z-states: "posterior" for world model training and "prior" + for creating the dream data. + + Initial internal state values (`a`, `h`, and `z`) are used where ever a new episode + starts within a batch row OR at the beginning of each train batch's B rows, + regardless of whether there was an actual episode boundary or not. Thus, internal + states are not required to be stored in or retrieved from the replay buffer AND + retrieved batches from the buffer must not be zero padded. + + Initial `a` is the zero "one hot" action, e.g. [0.0, 0.0] for Discrete(2), initial + `h` is a separate learned variable, and initial `z` are computed by the "dynamics" + (or "prior") net, using only the initial-h state as input. + + The GRU in this SequenceModel always produces the next h-state, then. + """ + + def __init__( + self, + *, + model_size: Optional[str] = "XS", + action_space: gym.Space, + num_gru_units: Optional[int] = None, + ): + """Initializes a SequenceModel instance. + + Args: + model_size: The "Model Size" used according to [1] Appendinx B. + Use None for manually setting the number of GRU units used. + action_space: The action space of the environment used. + num_gru_units: Overrides the number of GRU units (dimension of the h-state). + If None, use the value given through `model_size` + (see [1] Appendix B). + """ + super().__init__(name="sequence_model") + + self.model_size = model_size + self.action_space = action_space + num_gru_units = get_gru_units(self.model_size, override=num_gru_units) + + # In Danijar's code, there is an additional layer (units=[model_size]) + # prior to the GRU (but always only with 1 layer), which is not mentioned in + # the paper. + self.pre_gru_layer = MLP( + num_dense_layers=1, + model_size=self.model_size, + output_layer_size=None, + ) + self.gru_unit = tf.keras.layers.GRU( + num_gru_units, + return_sequences=False, + return_state=False, + # Note: Changing these activations is most likely a bad idea! + # In experiments, setting one of both of them to silu deteriorated + # performance significantly. + # activation=tf.nn.silu, + # recurrent_activation=tf.nn.silu, + ) + + # Trace self.call. + dl_type = tf.keras.mixed_precision.global_policy().compute_dtype or tf.float32 + self.call = tf.function( + input_signature=[ + tf.TensorSpec( + shape=[None] + + ( + [action_space.n] + if isinstance(action_space, gym.spaces.Discrete) + else list(action_space.shape) + ), + dtype=dl_type, + ), + tf.TensorSpec(shape=[None, num_gru_units], dtype=dl_type), + tf.TensorSpec( + shape=[ + None, + get_num_z_categoricals(self.model_size), + get_num_z_classes(self.model_size), + ], + dtype=dl_type, + ), + ] + )(self.call) + + def call(self, a, h, z): + """ + + Args: + a: The previous action (already one-hot'd if applicable). (B, ...). + h: The previous deterministic hidden state of the sequence model. + (B, num_gru_units) + z: The previous stochastic discrete representations of the original + observation input. (B, num_categoricals, num_classes_per_categorical). + """ + # Flatten last two dims of z. + z_shape = tf.shape(z) + z = tf.reshape(z, shape=(z_shape[0], -1)) + out = tf.concat([z, a], axis=-1) + out.set_shape( + [ + None, + ( + get_num_z_categoricals(self.model_size) + * get_num_z_classes(self.model_size) + + ( + self.action_space.n + if isinstance(self.action_space, gym.spaces.Discrete) + else int(np.prod(self.action_space.shape)) + ) + ), + ] + ) + # Pass through pre-GRU layer. + out = self.pre_gru_layer(out) + # Pass through (batch-major) GRU (expand axis=1 as the time axis). + h_next = self.gru_unit(tf.expand_dims(out, axis=1), initial_state=h) + # Return the GRU's output (the next h-state). + return h_next diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/disagree_networks.py b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/disagree_networks.py new file mode 100644 index 0000000000000000000000000000000000000000..5bc43d1e251f1fc713d6fc014a9d2e2c834f83a8 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/disagree_networks.py @@ -0,0 +1,94 @@ +""" +[1] Mastering Diverse Domains through World Models - 2023 +D. Hafner, J. Pasukonis, J. Ba, T. Lillicrap +https://arxiv.org/pdf/2301.04104v1.pdf +""" + +from ray.rllib.algorithms.dreamerv3.tf.models.components.mlp import MLP +from ray.rllib.algorithms.dreamerv3.tf.models.components.representation_layer import ( + RepresentationLayer, +) +from ray.rllib.utils.framework import try_import_tf, try_import_tfp + +_, tf, _ = try_import_tf() +tfp = try_import_tfp() + + +class DisagreeNetworks(tf.keras.Model): + """Predict the RSSM's z^(t+1), given h(t), z^(t), and a(t). + + Disagreement (stddev) between the N networks in this model on what the next z^ would + be are used to produce intrinsic rewards for enhanced, curiosity-based exploration. + + TODO + """ + + def __init__(self, *, num_networks, model_size, intrinsic_rewards_scale): + super().__init__(name="disagree_networks") + + self.model_size = model_size + self.num_networks = num_networks + self.intrinsic_rewards_scale = intrinsic_rewards_scale + + self.mlps = [] + self.representation_layers = [] + + for _ in range(self.num_networks): + self.mlps.append( + MLP( + model_size=self.model_size, + output_layer_size=None, + trainable=True, + ) + ) + self.representation_layers.append( + RepresentationLayer(model_size=self.model_size, name="disagree") + ) + + def call(self, inputs, z, a, training=None): + return self.forward_train(a=a, h=inputs, z=z) + + def compute_intrinsic_rewards(self, h, z, a): + forward_train_outs = self.forward_train(a=a, h=h, z=z) + B = tf.shape(h)[0] + + # Intrinsic rewards are computed as: + # Stddev (between the different nets) of the 32x32 discrete, stochastic + # probabilities. Meaning that if the larger the disagreement + # (stddev) between the nets on what the probabilities for the different + # classes should be, the higher the intrinsic reward. + z_predicted_probs_N_B = forward_train_outs["z_predicted_probs_N_HxB"] + N = len(z_predicted_probs_N_B) + z_predicted_probs_N_B = tf.stack(z_predicted_probs_N_B, axis=0) + # Flatten z-dims (num_categoricals x num_classes). + z_predicted_probs_N_B = tf.reshape(z_predicted_probs_N_B, shape=(N, B, -1)) + + # Compute stddevs over all disagree nets (axis=0). + # Mean over last axis ([num categoricals] x [num classes] folded axis). + stddevs_B_mean = tf.reduce_mean( + tf.math.reduce_std(z_predicted_probs_N_B, axis=0), + axis=-1, + ) + # TEST: + stddevs_B_mean -= tf.reduce_mean(stddevs_B_mean) + # END TEST + return { + "rewards_intrinsic": stddevs_B_mean * self.intrinsic_rewards_scale, + "forward_train_outs": forward_train_outs, + } + + def forward_train(self, a, h, z): + HxB = tf.shape(h)[0] + # Fold z-dims. + z = tf.reshape(z, shape=(HxB, -1)) + # Concat all input components (h, z, and a). + inputs_ = tf.stop_gradient(tf.concat([h, z, a], axis=-1)) + + z_predicted_probs_N_HxB = [ + repr(mlp(inputs_))[1] # [0]=sample; [1]=returned probs + for mlp, repr in zip(self.mlps, self.representation_layers) + ] + # shape=(N, HxB, [num categoricals], [num classes]); N=number of disagree nets. + # HxB -> folded horizon_H x batch_size_B (from dreamed data). + + return {"z_predicted_probs_N_HxB": z_predicted_probs_N_HxB} diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/dreamer_model.py b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/dreamer_model.py new file mode 100644 index 0000000000000000000000000000000000000000..e74a283da31d25cdb4edba88d75bca26d71a595b --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/dreamer_model.py @@ -0,0 +1,606 @@ +""" +[1] Mastering Diverse Domains through World Models - 2023 +D. Hafner, J. Pasukonis, J. Ba, T. Lillicrap +https://arxiv.org/pdf/2301.04104v1.pdf +""" +import re + +import gymnasium as gym +import numpy as np + +from ray.rllib.algorithms.dreamerv3.tf.models.disagree_networks import DisagreeNetworks +from ray.rllib.algorithms.dreamerv3.tf.models.actor_network import ActorNetwork +from ray.rllib.algorithms.dreamerv3.tf.models.critic_network import CriticNetwork +from ray.rllib.algorithms.dreamerv3.tf.models.world_model import WorldModel +from ray.rllib.algorithms.dreamerv3.utils import ( + get_gru_units, + get_num_z_categoricals, + get_num_z_classes, +) +from ray.rllib.utils.framework import try_import_tf +from ray.rllib.utils.tf_utils import inverse_symlog + +_, tf, _ = try_import_tf() + + +class DreamerModel(tf.keras.Model): + """The main tf-keras model containing all necessary components for DreamerV3. + + Includes: + - The world model with encoder, decoder, sequence-model (RSSM), dynamics + (generates prior z-state), and "posterior" model (generates posterior z-state). + Predicts env dynamics and produces dreamed trajectories for actor- and critic + learning. + - The actor network (policy). + - The critic network for value function prediction. + """ + + def __init__( + self, + *, + model_size: str = "XS", + action_space: gym.Space, + world_model: WorldModel, + actor: ActorNetwork, + critic: CriticNetwork, + horizon: int, + gamma: float, + use_curiosity: bool = False, + intrinsic_rewards_scale: float = 0.1, + ): + """Initializes a DreamerModel instance. + + Args: + model_size: The "Model Size" used according to [1] Appendinx B. + Use None for manually setting the different network sizes. + action_space: The action space of the environment used. + world_model: The WorldModel component. + actor: The ActorNetwork component. + critic: The CriticNetwork component. + horizon: The dream horizon to use when creating dreamed trajectories. + """ + super().__init__(name="dreamer_model") + + self.model_size = model_size + self.action_space = action_space + self.use_curiosity = use_curiosity + + self.world_model = world_model + self.actor = actor + self.critic = critic + + self.horizon = horizon + self.gamma = gamma + self._comp_dtype = ( + tf.keras.mixed_precision.global_policy().compute_dtype or tf.float32 + ) + + self.disagree_nets = None + if self.use_curiosity: + self.disagree_nets = DisagreeNetworks( + num_networks=8, + model_size=self.model_size, + intrinsic_rewards_scale=intrinsic_rewards_scale, + ) + + self.dream_trajectory = tf.function( + input_signature=[ + { + "h": tf.TensorSpec( + shape=[ + None, + get_gru_units(self.model_size), + ], + dtype=self._comp_dtype, + ), + "z": tf.TensorSpec( + shape=[ + None, + get_num_z_categoricals(self.model_size), + get_num_z_classes(self.model_size), + ], + dtype=self._comp_dtype, + ), + }, + tf.TensorSpec(shape=[None], dtype=tf.bool), + ] + )(self.dream_trajectory) + + def call( + self, + inputs, + observations, + actions, + is_first, + start_is_terminated_BxT, + gamma, + ): + """Main call method for building this model in order to generate its variables. + + Note: This method should NOT be used by users directly. It's purpose is only to + perform all forward passes necessary to define all variables of the DreamerV3. + """ + + # Forward passes through all models are enough to build all trainable and + # non-trainable variables: + + # World model. + results = self.world_model.forward_train( + observations, + actions, + is_first, + ) + # Actor. + _, distr_params = self.actor( + h=results["h_states_BxT"], + z=results["z_posterior_states_BxT"], + ) + # Critic. + values, _ = self.critic( + h=results["h_states_BxT"], + z=results["z_posterior_states_BxT"], + use_ema=tf.convert_to_tensor(False), + ) + + # Dream pipeline. + dream_data = self.dream_trajectory( + start_states={ + "h": results["h_states_BxT"], + "z": results["z_posterior_states_BxT"], + }, + start_is_terminated=start_is_terminated_BxT, + ) + + return { + "world_model_fwd": results, + "dream_data": dream_data, + "actions": actions, + "values": values, + } + + @tf.function + def forward_inference(self, observations, previous_states, is_first, training=None): + """Performs a (non-exploring) action computation step given obs and states. + + Note that all input data should not have a time rank (only a batch dimension). + + Args: + observations: The current environment observation with shape (B, ...). + previous_states: Dict with keys `a`, `h`, and `z` used as input to the RSSM + to produce the next h-state, from which then to compute the action + using the actor network. All values in the dict should have shape + (B, ...) (no time rank). + is_first: Batch of is_first flags. These should be True if a new episode + has been started at the current timestep (meaning `observations` is the + reset observation from the environment). + """ + # Perform one step in the world model (starting from `previous_state` and + # using the observations to yield a current (posterior) state). + states = self.world_model.forward_inference( + observations=observations, + previous_states=previous_states, + is_first=is_first, + ) + # Compute action using our actor network and the current states. + _, distr_params = self.actor(h=states["h"], z=states["z"]) + # Use the mode of the distribution (Discrete=argmax, Normal=mean). + distr = self.actor.get_action_dist_object(distr_params) + actions = distr.mode() + return actions, {"h": states["h"], "z": states["z"], "a": actions} + + @tf.function + def forward_exploration( + self, observations, previous_states, is_first, training=None + ): + """Performs an exploratory action computation step given obs and states. + + Note that all input data should not have a time rank (only a batch dimension). + + Args: + observations: The current environment observation with shape (B, ...). + previous_states: Dict with keys `a`, `h`, and `z` used as input to the RSSM + to produce the next h-state, from which then to compute the action + using the actor network. All values in the dict should have shape + (B, ...) (no time rank). + is_first: Batch of is_first flags. These should be True if a new episode + has been started at the current timestep (meaning `observations` is the + reset observation from the environment). + """ + # Perform one step in the world model (starting from `previous_state` and + # using the observations to yield a current (posterior) state). + states = self.world_model.forward_inference( + observations=observations, + previous_states=previous_states, + is_first=is_first, + ) + # Compute action using our actor network and the current states. + actions, _ = self.actor(h=states["h"], z=states["z"]) + return actions, {"h": states["h"], "z": states["z"], "a": actions} + + def forward_train(self, observations, actions, is_first): + """Performs a training forward pass given observations and actions. + + Note that all input data must have a time rank (batch-major: [B, T, ...]). + + Args: + observations: The environment observations with shape (B, T, ...). Thus, + the batch has B rows of T timesteps each. Note that it's ok to have + episode boundaries (is_first=True) within a batch row. DreamerV3 will + simply insert an initial state before these locations and continue the + sequence modelling (with the RSSM). Hence, there will be no zero + padding. + actions: The actions actually taken in the environment with shape + (B, T, ...). See `observations` docstring for details on how B and T are + handled. + is_first: Batch of is_first flags. These should be True: + - if a new episode has been started at the current timestep (meaning + `observations` is the reset observation from the environment). + - in each batch row at T=0 (first timestep of each of the B batch + rows), regardless of whether the actual env had an episode boundary + there or not. + """ + return self.world_model.forward_train( + observations=observations, + actions=actions, + is_first=is_first, + ) + + @tf.function + def get_initial_state(self): + """Returns the (current) initial state of the dreamer model (a, h-, z-states). + + An initial state is generated using the previous action, the tanh of the + (learned) h-state variable and the dynamics predictor (or "prior net") to + compute z^0 from h0. In this last step, it is important that we do NOT sample + the z^-state (as we would usually do during dreaming), but rather take the mode + (argmax, then one-hot again). + """ + states = self.world_model.get_initial_state() + + action_dim = ( + self.action_space.n + if isinstance(self.action_space, gym.spaces.Discrete) + else np.prod(self.action_space.shape) + ) + states["a"] = tf.zeros( + ( + 1, + action_dim, + ), + dtype=tf.keras.mixed_precision.global_policy().compute_dtype or tf.float32, + ) + return states + + def dream_trajectory(self, start_states, start_is_terminated): + """Dreams trajectories of length H from batch of h- and z-states. + + Note that incoming data will have the shapes (BxT, ...), where the original + batch- and time-dimensions are already folded together. Beginning from this + new batch dim (BxT), we will unroll `timesteps_H` timesteps in a time-major + fashion, such that the dreamed data will have shape (H, BxT, ...). + + Args: + start_states: Dict of `h` and `z` states in the shape of (B, ...) and + (B, num_categoricals, num_classes), respectively, as + computed by a train forward pass. From each individual h-/z-state pair + in the given batch, we will branch off a dreamed trajectory of len + `timesteps_H`. + start_is_terminated: Float flags of shape (B,) indicating whether the + first timesteps of each batch row is already a terminated timestep + (given by the actual environment). + """ + # Dreamed actions (one-hot encoded for discrete actions). + a_dreamed_t0_to_H = [] + a_dreamed_dist_params_t0_to_H = [] + + h = start_states["h"] + z = start_states["z"] + + # GRU outputs. + h_states_t0_to_H = [h] + # Dynamics model outputs. + z_states_prior_t0_to_H = [z] + + # Compute `a` using actor network (already the first step uses a dreamed action, + # not a sampled one). + a, a_dist_params = self.actor( + # We have to stop the gradients through the states. B/c we are using a + # differentiable Discrete action distribution (straight through gradients + # with `a = stop_gradient(sample(probs)) + probs - stop_gradient(probs)`, + # we otherwise would add dependencies of the `-log(pi(a|s))` REINFORCE loss + # term on actions further back in the trajectory. + h=tf.stop_gradient(h), + z=tf.stop_gradient(z), + ) + a_dreamed_t0_to_H.append(a) + a_dreamed_dist_params_t0_to_H.append(a_dist_params) + + for i in range(self.horizon): + # Move one step in the dream using the RSSM. + h = self.world_model.sequence_model(a=a, h=h, z=z) + h_states_t0_to_H.append(h) + + # Compute prior z using dynamics model. + z, _ = self.world_model.dynamics_predictor(h=h) + z_states_prior_t0_to_H.append(z) + + # Compute `a` using actor network. + a, a_dist_params = self.actor( + h=tf.stop_gradient(h), + z=tf.stop_gradient(z), + ) + a_dreamed_t0_to_H.append(a) + a_dreamed_dist_params_t0_to_H.append(a_dist_params) + + h_states_H_B = tf.stack(h_states_t0_to_H, axis=0) # (T, B, ...) + h_states_HxB = tf.reshape(h_states_H_B, [-1] + h_states_H_B.shape.as_list()[2:]) + + z_states_prior_H_B = tf.stack(z_states_prior_t0_to_H, axis=0) # (T, B, ...) + z_states_prior_HxB = tf.reshape( + z_states_prior_H_B, [-1] + z_states_prior_H_B.shape.as_list()[2:] + ) + + a_dreamed_H_B = tf.stack(a_dreamed_t0_to_H, axis=0) # (T, B, ...) + a_dreamed_dist_params_H_B = tf.stack(a_dreamed_dist_params_t0_to_H, axis=0) + + # Compute r using reward predictor. + r_dreamed_HxB, _ = self.world_model.reward_predictor( + h=h_states_HxB, z=z_states_prior_HxB + ) + r_dreamed_H_B = tf.reshape( + inverse_symlog(r_dreamed_HxB), shape=[self.horizon + 1, -1] + ) + + # Compute intrinsic rewards. + if self.use_curiosity: + results_HxB = self.disagree_nets.compute_intrinsic_rewards( + h=h_states_HxB, + z=z_states_prior_HxB, + a=tf.reshape(a_dreamed_H_B, [-1] + a_dreamed_H_B.shape.as_list()[2:]), + ) + # TODO (sven): Wrong? -> Cut out last timestep as we always predict z-states + # for the NEXT timestep and derive ri (for the NEXT timestep) from the + # disagreement between our N disagreee nets. + r_intrinsic_H_B = tf.reshape( + results_HxB["rewards_intrinsic"], shape=[self.horizon + 1, -1] + )[ + 1: + ] # cut out first ts instead + curiosity_forward_train_outs = results_HxB["forward_train_outs"] + del results_HxB + + # Compute continues using continue predictor. + c_dreamed_HxB, _ = self.world_model.continue_predictor( + h=h_states_HxB, + z=z_states_prior_HxB, + ) + c_dreamed_H_B = tf.reshape(c_dreamed_HxB, [self.horizon + 1, -1]) + # Force-set first `continue` flags to False iff `start_is_terminated`. + # Note: This will cause the loss-weights for this row in the batch to be + # completely zero'd out. In general, we don't use dreamed data past any + # predicted (or actual first) continue=False flags. + c_dreamed_H_B = tf.concat( + [ + 1.0 + - tf.expand_dims( + tf.cast(start_is_terminated, tf.float32), + 0, + ), + c_dreamed_H_B[1:], + ], + axis=0, + ) + + # Loss weights for each individual dreamed timestep. Zero-out all timesteps + # that lie past continue=False flags. B/c our world model does NOT learn how + # to skip terminal/reset episode boundaries, dreamed data crossing such a + # boundary should not be used for critic/actor learning either. + dream_loss_weights_H_B = ( + tf.math.cumprod(self.gamma * c_dreamed_H_B, axis=0) / self.gamma + ) + + # Compute the value estimates. + v, v_symlog_dreamed_logits_HxB = self.critic( + h=h_states_HxB, + z=z_states_prior_HxB, + use_ema=False, + ) + v_dreamed_HxB = inverse_symlog(v) + v_dreamed_H_B = tf.reshape(v_dreamed_HxB, shape=[self.horizon + 1, -1]) + + v_symlog_dreamed_ema_HxB, _ = self.critic( + h=h_states_HxB, + z=z_states_prior_HxB, + use_ema=True, + ) + v_symlog_dreamed_ema_H_B = tf.reshape( + v_symlog_dreamed_ema_HxB, shape=[self.horizon + 1, -1] + ) + + ret = { + "h_states_t0_to_H_BxT": h_states_H_B, + "z_states_prior_t0_to_H_BxT": z_states_prior_H_B, + "rewards_dreamed_t0_to_H_BxT": r_dreamed_H_B, + "continues_dreamed_t0_to_H_BxT": c_dreamed_H_B, + "actions_dreamed_t0_to_H_BxT": a_dreamed_H_B, + "actions_dreamed_dist_params_t0_to_H_BxT": a_dreamed_dist_params_H_B, + "values_dreamed_t0_to_H_BxT": v_dreamed_H_B, + "values_symlog_dreamed_logits_t0_to_HxBxT": v_symlog_dreamed_logits_HxB, + "v_symlog_dreamed_ema_t0_to_H_BxT": v_symlog_dreamed_ema_H_B, + # Loss weights for critic- and actor losses. + "dream_loss_weights_t0_to_H_BxT": dream_loss_weights_H_B, + } + + if self.use_curiosity: + ret["rewards_intrinsic_t1_to_H_B"] = r_intrinsic_H_B + ret.update(curiosity_forward_train_outs) + + if isinstance(self.action_space, gym.spaces.Discrete): + ret["actions_ints_dreamed_t0_to_H_B"] = tf.argmax(a_dreamed_H_B, axis=-1) + + return ret + + def dream_trajectory_with_burn_in( + self, + *, + start_states, + timesteps_burn_in: int, + timesteps_H: int, + observations, # [B, >=timesteps_burn_in] + actions, # [B, timesteps_burn_in (+timesteps_H)?] + use_sampled_actions_in_dream: bool = False, + use_random_actions_in_dream: bool = False, + ): + """Dreams trajectory from N initial observations and initial states. + + Note: This is only used for reporting and debugging, not for actual world-model + or policy training. + + Args: + start_states: The batch of start states (dicts with `a`, `h`, and `z` keys) + to begin dreaming with. These are used to compute the first h-state + using the sequence model. + timesteps_burn_in: For how many timesteps should be use the posterior + z-states (computed by the posterior net and actual observations from + the env)? + timesteps_H: For how many timesteps should we dream using the prior + z-states (computed by the dynamics (prior) net and h-states only)? + Note that the total length of the returned trajectories will + be `timesteps_burn_in` + `timesteps_H`. + observations: The batch (B, T, ...) of observations (to be used only during + burn-in over `timesteps_burn_in` timesteps). + actions: The batch (B, T, ...) of actions to use during a) burn-in over the + first `timesteps_burn_in` timesteps and - possibly - b) during + actual dreaming, iff use_sampled_actions_in_dream=True. + If applicable, actions must already be one-hot'd. + use_sampled_actions_in_dream: If True, instead of using our actor network + to compute fresh actions, we will use the one provided via the `actions` + argument. Note that in the latter case, the `actions` time dimension + must be at least `timesteps_burn_in` + `timesteps_H` long. + use_random_actions_in_dream: Whether to use randomly sampled actions in the + dream. Note that this does not apply to the burn-in phase, during which + we will always use the actions given in the `actions` argument. + """ + assert not (use_sampled_actions_in_dream and use_random_actions_in_dream) + + B = observations.shape[0] + + # Produce initial N internal posterior states (burn-in) using the given + # observations: + states = start_states + for i in range(timesteps_burn_in): + states = self.world_model.forward_inference( + observations=observations[:, i], + previous_states=states, + is_first=tf.fill((B,), 1.0 if i == 0 else 0.0), + ) + states["a"] = actions[:, i] + + # Start producing the actual dream, using prior states and either the given + # actions, dreamed, or random ones. + h_states_t0_to_H = [states["h"]] + z_states_prior_t0_to_H = [states["z"]] + a_t0_to_H = [states["a"]] + + for j in range(timesteps_H): + # Compute next h using sequence model. + h = self.world_model.sequence_model( + a=states["a"], + h=states["h"], + z=states["z"], + ) + h_states_t0_to_H.append(h) + # Compute z from h, using the dynamics model (we don't have an actual + # observation at this timestep). + z, _ = self.world_model.dynamics_predictor(h=h) + z_states_prior_t0_to_H.append(z) + + # Compute next dreamed action or use sampled one or random one. + if use_sampled_actions_in_dream: + a = actions[:, timesteps_burn_in + j] + elif use_random_actions_in_dream: + if isinstance(self.action_space, gym.spaces.Discrete): + a = tf.random.randint((B,), 0, self.action_space.n, tf.int64) + a = tf.one_hot( + a, + depth=self.action_space.n, + dtype=tf.keras.mixed_precision.global_policy().compute_dtype + or tf.float32, + ) + # TODO: Support cont. action spaces with bound other than 0.0 and 1.0. + else: + a = tf.random.uniform( + shape=(B,) + self.action_space.shape, + dtype=self.action_space.dtype, + ) + else: + a, _ = self.actor(h=h, z=z) + a_t0_to_H.append(a) + + states = {"h": h, "z": z, "a": a} + + # Fold time-rank for upcoming batch-predictions (no sequences needed anymore). + h_states_t0_to_H_B = tf.stack(h_states_t0_to_H, axis=0) + h_states_t0_to_HxB = tf.reshape( + h_states_t0_to_H_B, shape=[-1] + h_states_t0_to_H_B.shape.as_list()[2:] + ) + + z_states_prior_t0_to_H_B = tf.stack(z_states_prior_t0_to_H, axis=0) + z_states_prior_t0_to_HxB = tf.reshape( + z_states_prior_t0_to_H_B, + shape=[-1] + z_states_prior_t0_to_H_B.shape.as_list()[2:], + ) + + a_t0_to_H_B = tf.stack(a_t0_to_H, axis=0) + + # Compute o using decoder. + o_dreamed_t0_to_HxB = self.world_model.decoder( + h=h_states_t0_to_HxB, + z=z_states_prior_t0_to_HxB, + ) + if self.world_model.symlog_obs: + o_dreamed_t0_to_HxB = inverse_symlog(o_dreamed_t0_to_HxB) + + # Compute r using reward predictor. + r_dreamed_t0_to_HxB, _ = self.world_model.reward_predictor( + h=h_states_t0_to_HxB, + z=z_states_prior_t0_to_HxB, + ) + r_dreamed_t0_to_HxB = inverse_symlog(r_dreamed_t0_to_HxB) + # Compute continues using continue predictor. + c_dreamed_t0_to_HxB, _ = self.world_model.continue_predictor( + h=h_states_t0_to_HxB, + z=z_states_prior_t0_to_HxB, + ) + + # Return everything as time-major (H, B, ...), where H is the timesteps dreamed + # (NOT burn-in'd) and B is a batch dimension (this might or might not include + # an original time dimension from the real env, from all of which we then branch + # out our dream trajectories). + ret = { + "h_states_t0_to_H_BxT": h_states_t0_to_H_B, + "z_states_prior_t0_to_H_BxT": z_states_prior_t0_to_H_B, + # Unfold time-ranks in predictions. + "observations_dreamed_t0_to_H_BxT": tf.reshape( + o_dreamed_t0_to_HxB, [-1, B] + list(observations.shape)[2:] + ), + "rewards_dreamed_t0_to_H_BxT": tf.reshape(r_dreamed_t0_to_HxB, (-1, B)), + "continues_dreamed_t0_to_H_BxT": tf.reshape(c_dreamed_t0_to_HxB, (-1, B)), + } + + # Figure out action key (random, sampled from env, dreamed?). + if use_sampled_actions_in_dream: + key = "actions_sampled_t0_to_H_BxT" + elif use_random_actions_in_dream: + key = "actions_random_t0_to_H_BxT" + else: + key = "actions_dreamed_t0_to_H_BxT" + ret[key] = a_t0_to_H_B + + # Also provide int-actions, if discrete action space. + if isinstance(self.action_space, gym.spaces.Discrete): + ret[re.sub("^actions_", "actions_ints_", key)] = tf.argmax( + a_t0_to_H_B, axis=-1 + ) + + return ret diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/world_model.py b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/world_model.py new file mode 100644 index 0000000000000000000000000000000000000000..f3bd20ff4667f07603bd85acf502cc7adbf280e0 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/world_model.py @@ -0,0 +1,407 @@ +""" +[1] Mastering Diverse Domains through World Models - 2023 +D. Hafner, J. Pasukonis, J. Ba, T. Lillicrap +https://arxiv.org/pdf/2301.04104v1.pdf +""" +from typing import Optional + +import gymnasium as gym +import tree # pip install dm_tree + +from ray.rllib.algorithms.dreamerv3.tf.models.components.continue_predictor import ( + ContinuePredictor, +) +from ray.rllib.algorithms.dreamerv3.tf.models.components.dynamics_predictor import ( + DynamicsPredictor, +) +from ray.rllib.algorithms.dreamerv3.tf.models.components.mlp import MLP +from ray.rllib.algorithms.dreamerv3.tf.models.components.representation_layer import ( + RepresentationLayer, +) +from ray.rllib.algorithms.dreamerv3.tf.models.components.reward_predictor import ( + RewardPredictor, +) +from ray.rllib.algorithms.dreamerv3.tf.models.components.sequence_model import ( + SequenceModel, +) +from ray.rllib.algorithms.dreamerv3.utils import get_gru_units +from ray.rllib.utils.framework import try_import_tf +from ray.rllib.utils.tf_utils import symlog + + +_, tf, _ = try_import_tf() + + +class WorldModel(tf.keras.Model): + """WorldModel component of [1] w/ encoder, decoder, RSSM, reward/cont. predictors. + + See eq. 3 of [1] for all components and their respective in- and outputs. + Note that in the paper, the "encoder" includes both the raw encoder plus the + "posterior net", which produces posterior z-states from observations and h-states. + + Note: The "internal state" of the world model always consists of: + The actions `a` (initially, this is a zeroed-out action), `h`-states (deterministic, + continuous), and `z`-states (stochastic, discrete). + There are two versions of z-states: "posterior" for world model training and "prior" + for creating the dream data. + + Initial internal state values (`a`, `h`, and `z`) are inserted where ever a new + episode starts within a batch row OR at the beginning of each train batch's B rows, + regardless of whether there was an actual episode boundary or not. Thus, internal + states are not required to be stored in or retrieved from the replay buffer AND + retrieved batches from the buffer must not be zero padded. + + Initial `a` is the zero "one hot" action, e.g. [0.0, 0.0] for Discrete(2), initial + `h` is a separate learned variable, and initial `z` are computed by the "dynamics" + (or "prior") net, using only the initial-h state as input. + """ + + def __init__( + self, + *, + model_size: str = "XS", + observation_space: gym.Space, + action_space: gym.Space, + batch_length_T: int = 64, + encoder: tf.keras.Model, + decoder: tf.keras.Model, + num_gru_units: Optional[int] = None, + symlog_obs: bool = True, + ): + """Initializes a WorldModel instance. + + Args: + model_size: The "Model Size" used according to [1] Appendinx B. + Use None for manually setting the different network sizes. + observation_space: The observation space of the environment used. + action_space: The action space of the environment used. + batch_length_T: The length (T) of the sequences used for training. The + actual shape of the input data (e.g. rewards) is then: [B, T, ...], + where B is the "batch size", T is the "batch length" (this arg) and + "..." is the dimension of the data (e.g. (64, 64, 3) for Atari image + observations). Note that a single row (within a batch) may contain data + from different episodes, but an already on-going episode is always + finished, before a new one starts within the same row. + encoder: The encoder Model taking observations as inputs and + outputting a 1D latent vector that will be used as input into the + posterior net (z-posterior state generating layer). Inputs are symlogged + if inputs are NOT images. For images, we use normalization between -1.0 + and 1.0 (x / 128 - 1.0) + decoder: The decoder Model taking h- and z-states as inputs and generating + a (possibly symlogged) predicted observation. Note that for images, + the last decoder layer produces the exact, normalized pixel values + (not a Gaussian as described in [1]!). + num_gru_units: The number of GRU units to use. If None, use + `model_size` to figure out this parameter. + symlog_obs: Whether to predict decoded observations in symlog space. + This should be False for image based observations. + According to the paper [1] Appendix E: "NoObsSymlog: This ablation + removes the symlog encoding of inputs to the world model and also + changes the symlog MSE loss in the decoder to a simple MSE loss. + *Because symlog encoding is only used for vector observations*, this + ablation is equivalent to DreamerV3 on purely image-based environments". + """ + super().__init__(name="world_model") + + self.model_size = model_size + self.batch_length_T = batch_length_T + self.symlog_obs = symlog_obs + self.observation_space = observation_space + self.action_space = action_space + self._comp_dtype = ( + tf.keras.mixed_precision.global_policy().compute_dtype or tf.float32 + ) + + # Encoder (latent 1D vector generator) (xt -> lt). + self.encoder = encoder + + # Posterior predictor consisting of an MLP and a RepresentationLayer: + # [ht, lt] -> zt. + self.posterior_mlp = MLP( + model_size=self.model_size, + output_layer_size=None, + # In Danijar's code, the posterior predictor only has a single layer, + # no matter the model size: + num_dense_layers=1, + name="posterior_mlp", + ) + # The (posterior) z-state generating layer. + self.posterior_representation_layer = RepresentationLayer( + model_size=self.model_size, + ) + + # Dynamics (prior z-state) predictor: ht -> z^t + self.dynamics_predictor = DynamicsPredictor(model_size=self.model_size) + + # GRU for the RSSM: [at, ht, zt] -> ht+1 + self.num_gru_units = get_gru_units( + model_size=self.model_size, + override=num_gru_units, + ) + # Initial h-state variable (learnt). + # -> tanh(self.initial_h) -> deterministic state + # Use our Dynamics predictor for initial stochastic state, BUT with greedy + # (mode) instead of sampling. + self.initial_h = tf.Variable( + tf.zeros(shape=(self.num_gru_units,)), + trainable=True, + name="initial_h", + ) + # The actual sequence model containing the GRU layer. + self.sequence_model = SequenceModel( + model_size=self.model_size, + action_space=self.action_space, + num_gru_units=self.num_gru_units, + ) + + # Reward Predictor: [ht, zt] -> rt. + self.reward_predictor = RewardPredictor(model_size=self.model_size) + # Continue Predictor: [ht, zt] -> ct. + self.continue_predictor = ContinuePredictor(model_size=self.model_size) + + # Decoder: [ht, zt] -> x^t. + self.decoder = decoder + + # Trace self.call. + self.forward_train = tf.function( + input_signature=[ + tf.TensorSpec(shape=[None, None] + list(self.observation_space.shape)), + tf.TensorSpec( + shape=[None, None] + + ( + [self.action_space.n] + if isinstance(action_space, gym.spaces.Discrete) + else list(self.action_space.shape) + ) + ), + tf.TensorSpec(shape=[None, None], dtype=tf.bool), + ] + )(self.forward_train) + + @tf.function + def get_initial_state(self): + """Returns the (current) initial state of the world model (h- and z-states). + + An initial state is generated using the tanh of the (learned) h-state variable + and the dynamics predictor (or "prior net") to compute z^0 from h0. In this last + step, it is important that we do NOT sample the z^-state (as we would usually + do during dreaming), but rather take the mode (argmax, then one-hot again). + """ + h = tf.expand_dims(tf.math.tanh(tf.cast(self.initial_h, self._comp_dtype)), 0) + # Use the mode, NOT a sample for the initial z-state. + _, z_probs = self.dynamics_predictor(h) + z = tf.argmax(z_probs, axis=-1) + z = tf.one_hot(z, depth=z_probs.shape[-1], dtype=self._comp_dtype) + + return {"h": h, "z": z} + + def forward_inference(self, observations, previous_states, is_first, training=None): + """Performs a forward step for inference (e.g. environment stepping). + + Works analogous to `forward_train`, except that all inputs are provided + for a single timestep in the shape of [B, ...] (no time dimension!). + + Args: + observations: The batch (B, ...) of observations to be passed through + the encoder network to yield the inputs to the representation layer + (which then can compute the z-states). + previous_states: A dict with `h`, `z`, and `a` keys mapping to the + respective previous states/actions. All of the shape (B, ...), no time + rank. + is_first: The batch (B) of `is_first` flags. + + Returns: + The next deterministic h-state (h(t+1)) as predicted by the sequence model. + """ + observations = tf.cast(observations, self._comp_dtype) + + initial_states = tree.map_structure( + lambda s: tf.repeat(s, tf.shape(observations)[0], axis=0), + self.get_initial_state(), + ) + + # If first, mask it with initial state/actions. + previous_h = self._mask(previous_states["h"], 1.0 - is_first) # zero out + previous_h = previous_h + self._mask(initial_states["h"], is_first) # add init + + previous_z = self._mask(previous_states["z"], 1.0 - is_first) # zero out + previous_z = previous_z + self._mask(initial_states["z"], is_first) # add init + + # Zero out actions (no special learnt initial state). + previous_a = self._mask(previous_states["a"], 1.0 - is_first) + + # Compute new states. + h = self.sequence_model(a=previous_a, h=previous_h, z=previous_z) + z = self.compute_posterior_z(observations=observations, initial_h=h) + + return {"h": h, "z": z} + + def forward_train(self, observations, actions, is_first): + """Performs a forward step for training. + + 1) Forwards all observations [B, T, ...] through the encoder network to yield + o_processed[B, T, ...]. + 2) Uses initial state (h0/z^0/a0[B, 0, ...]) and sequence model (RSSM) to + compute the first internal state (h1 and z^1). + 3) Uses action a[B, 1, ...], z[B, 1, ...] and h[B, 1, ...] to compute the + next h-state (h[B, 2, ...]), etc.. + 4) Repeats 2) and 3) until t=T. + 5) Uses all h[B, T, ...] and z[B, T, ...] to compute predicted/reconstructed + observations, rewards, and continue signals. + 6) Returns predictions from 5) along with all z-states z[B, T, ...] and + the final h-state (h[B, ...] for t=T). + + Should we encounter is_first=True flags in the middle of a batch row (somewhere + within an ongoing sequence of length T), we insert this world model's initial + state again (zero-action, learned init h-state, and prior-computed z^) and + simply continue (no zero-padding). + + Args: + observations: The batch (B, T, ...) of observations to be passed through + the encoder network to yield the inputs to the representation layer + (which then can compute the posterior z-states). + actions: The batch (B, T, ...) of actions to be used in combination with + h-states and computed z-states to yield the next h-states. + is_first: The batch (B, T) of `is_first` flags. + """ + if self.symlog_obs: + observations = symlog(observations) + + # Compute bare encoder outs (not z; this is done later with involvement of the + # sequence model and the h-states). + # Fold time dimension for CNN pass. + shape = tf.shape(observations) + B, T = shape[0], shape[1] + observations = tf.reshape( + observations, shape=tf.concat([[-1], shape[2:]], axis=0) + ) + + encoder_out = self.encoder(tf.cast(observations, self._comp_dtype)) + # Unfold time dimension. + encoder_out = tf.reshape( + encoder_out, shape=tf.concat([[B, T], tf.shape(encoder_out)[1:]], axis=0) + ) + # Make time major for faster upcoming loop. + encoder_out = tf.transpose( + encoder_out, perm=[1, 0] + list(range(2, len(encoder_out.shape.as_list()))) + ) + # encoder_out=[T, B, ...] + + initial_states = tree.map_structure( + lambda s: tf.repeat(s, B, axis=0), self.get_initial_state() + ) + + # Make actions and `is_first` time-major. + actions = tf.transpose( + tf.cast(actions, self._comp_dtype), + perm=[1, 0] + list(range(2, tf.shape(actions).shape.as_list()[0])), + ) + is_first = tf.transpose(tf.cast(is_first, self._comp_dtype), perm=[1, 0]) + + # Loop through the T-axis of our samples and perform one computation step at + # a time. This is necessary because the sequence model's output (h(t+1)) depends + # on the current z(t), but z(t) depends on the current sequence model's output + # h(t). + z_t0_to_T = [initial_states["z"]] + z_posterior_probs = [] + z_prior_probs = [] + h_t0_to_T = [initial_states["h"]] + for t in range(self.batch_length_T): + # If first, mask it with initial state/actions. + h_tm1 = self._mask(h_t0_to_T[-1], 1.0 - is_first[t]) # zero out + h_tm1 = h_tm1 + self._mask(initial_states["h"], is_first[t]) # add init + + z_tm1 = self._mask(z_t0_to_T[-1], 1.0 - is_first[t]) # zero out + z_tm1 = z_tm1 + self._mask(initial_states["z"], is_first[t]) # add init + + # Zero out actions (no special learnt initial state). + a_tm1 = self._mask(actions[t - 1], 1.0 - is_first[t]) + + # Perform one RSSM (sequence model) step to get the current h. + h_t = self.sequence_model(a=a_tm1, h=h_tm1, z=z_tm1) + h_t0_to_T.append(h_t) + + posterior_mlp_input = tf.concat([encoder_out[t], h_t], axis=-1) + repr_input = self.posterior_mlp(posterior_mlp_input) + # Draw one z-sample (z(t)) and also get the z-distribution for dynamics and + # representation loss computations. + z_t, z_probs = self.posterior_representation_layer(repr_input) + # z_t=[B, num_categoricals, num_classes] + z_posterior_probs.append(z_probs) + z_t0_to_T.append(z_t) + + # Compute the predicted z_t (z^) using the dynamics model. + _, z_probs = self.dynamics_predictor(h_t) + z_prior_probs.append(z_probs) + + # Stack at time dimension to yield: [B, T, ...]. + h_t1_to_T = tf.stack(h_t0_to_T[1:], axis=1) + z_t1_to_T = tf.stack(z_t0_to_T[1:], axis=1) + + # Fold time axis to retrieve the final (loss ready) Independent distribution + # (over `num_categoricals` Categoricals). + z_posterior_probs = tf.stack(z_posterior_probs, axis=1) + z_posterior_probs = tf.reshape( + z_posterior_probs, + shape=[-1] + z_posterior_probs.shape.as_list()[2:], + ) + # Fold time axis to retrieve the final (loss ready) Independent distribution + # (over `num_categoricals` Categoricals). + z_prior_probs = tf.stack(z_prior_probs, axis=1) + z_prior_probs = tf.reshape( + z_prior_probs, + shape=[-1] + z_prior_probs.shape.as_list()[2:], + ) + + # Fold time dimension for parallelization of all dependent predictions: + # observations (reproduction via decoder), rewards, continues. + h_BxT = tf.reshape(h_t1_to_T, shape=[-1] + h_t1_to_T.shape.as_list()[2:]) + z_BxT = tf.reshape(z_t1_to_T, shape=[-1] + z_t1_to_T.shape.as_list()[2:]) + + obs_distribution_means = tf.cast(self.decoder(h=h_BxT, z=z_BxT), tf.float32) + + # Compute (predicted) reward distributions. + rewards, reward_logits = self.reward_predictor(h=h_BxT, z=z_BxT) + + # Compute (predicted) continue distributions. + continues, continue_distribution = self.continue_predictor(h=h_BxT, z=z_BxT) + + # Return outputs for loss computation. + # Note that all shapes are [BxT, ...] (time axis already folded). + return { + # Obs. + "sampled_obs_symlog_BxT": observations, + "obs_distribution_means_BxT": obs_distribution_means, + # Rewards. + "reward_logits_BxT": reward_logits, + "rewards_BxT": rewards, + # Continues. + "continue_distribution_BxT": continue_distribution, + "continues_BxT": continues, + # Deterministic, continuous h-states (t1 to T). + "h_states_BxT": h_BxT, + # Sampled, discrete posterior z-states and their probs (t1 to T). + "z_posterior_states_BxT": z_BxT, + "z_posterior_probs_BxT": z_posterior_probs, + # Probs of the prior z-states (t1 to T). + "z_prior_probs_BxT": z_prior_probs, + } + + def compute_posterior_z(self, observations, initial_h): + # Compute bare encoder outputs (not including z, which is computed in next step + # with involvement of the previous output (initial_h) of the sequence model). + # encoder_outs=[B, ...] + if self.symlog_obs: + observations = symlog(observations) + encoder_out = self.encoder(observations) + # Concat encoder outs with the h-states. + posterior_mlp_input = tf.concat([encoder_out, initial_h], axis=-1) + # Compute z. + repr_input = self.posterior_mlp(posterior_mlp_input) + # Draw a z-sample. + z_t, _ = self.posterior_representation_layer(repr_input) + return z_t + + @staticmethod + def _mask(value, mask): + return tf.einsum("b...,b->b...", value, tf.cast(mask, value.dtype)) diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/utils/__init__.py b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fe7b58cf515ee0c01b83dac8b515a07489c11c91 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/utils/__init__.py @@ -0,0 +1,168 @@ +""" +Utility functions for the DreamerV3 ([1]) algorithm. + +[1] Mastering Diverse Domains through World Models - 2023 +D. Hafner, J. Pasukonis, J. Ba, T. Lillicrap +https://arxiv.org/pdf/2301.04104v1.pdf +""" + +_ALLOWED_MODEL_DIMS = [ + # RLlib debug sizes (not mentioned in [1]). + "nano", + "micro", + "mini", + "XXS", + # Regular sizes (listed in table B in [1]). + "XS", + "S", + "M", + "L", + "XL", +] + + +def get_cnn_multiplier(model_size, override=None): + if override is not None: + return override + + assert model_size in _ALLOWED_MODEL_DIMS + cnn_multipliers = { + "nano": 2, + "micro": 4, + "mini": 8, + "XXS": 16, + "XS": 24, + "S": 32, + "M": 48, + "L": 64, + "XL": 96, + } + return cnn_multipliers[model_size] + + +def get_dense_hidden_units(model_size, override=None): + if override is not None: + return override + + assert model_size in _ALLOWED_MODEL_DIMS + dense_units = { + "nano": 16, + "micro": 32, + "mini": 64, + "XXS": 128, + "XS": 256, + "S": 512, + "M": 640, + "L": 768, + "XL": 1024, + } + return dense_units[model_size] + + +def get_gru_units(model_size, override=None): + if override is not None: + return override + + assert model_size in _ALLOWED_MODEL_DIMS + gru_units = { + "nano": 16, + "micro": 32, + "mini": 64, + "XXS": 128, + "XS": 256, + "S": 512, + "M": 1024, + "L": 2048, + "XL": 4096, + } + return gru_units[model_size] + + +def get_num_z_categoricals(model_size, override=None): + if override is not None: + return override + + assert model_size in _ALLOWED_MODEL_DIMS + gru_units = { + "nano": 4, + "micro": 8, + "mini": 16, + "XXS": 32, + "XS": 32, + "S": 32, + "M": 32, + "L": 32, + "XL": 32, + } + return gru_units[model_size] + + +def get_num_z_classes(model_size, override=None): + if override is not None: + return override + + assert model_size in _ALLOWED_MODEL_DIMS + gru_units = { + "nano": 4, + "micro": 8, + "mini": 16, + "XXS": 32, + "XS": 32, + "S": 32, + "M": 32, + "L": 32, + "XL": 32, + } + return gru_units[model_size] + + +def get_num_curiosity_nets(model_size, override=None): + if override is not None: + return override + + assert model_size in _ALLOWED_MODEL_DIMS + num_curiosity_nets = { + "nano": 8, + "micro": 8, + "mini": 8, + "XXS": 8, + "XS": 8, + "S": 8, + "M": 8, + "L": 8, + "XL": 8, + } + return num_curiosity_nets[model_size] + + +def get_num_dense_layers(model_size, override=None): + if override is not None: + return override + + assert model_size in _ALLOWED_MODEL_DIMS + num_dense_layers = { + "nano": 1, + "micro": 1, + "mini": 1, + "XXS": 1, + "XS": 1, + "S": 2, + "M": 3, + "L": 4, + "XL": 5, + } + return num_dense_layers[model_size] + + +def do_symlog_obs(observation_space, symlog_obs_user_setting): + # If our symlog_obs setting is NOT set specifically (it's set to "auto"), return + # True if we don't have an image observation space, otherwise return False. + + # TODO (sven): Support mixed observation spaces. + + is_image_space = len(observation_space.shape) in [2, 3] + return ( + not is_image_space + if symlog_obs_user_setting == "auto" + else symlog_obs_user_setting + ) diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/utils/__pycache__/debugging.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/utils/__pycache__/debugging.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef4686eb488d253f470b3f4732154ba66352426e Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/utils/__pycache__/debugging.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/utils/__pycache__/env_runner.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/utils/__pycache__/env_runner.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d935ad3b7b649ea527c3c9de08ed8a718eb79a4a Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/utils/__pycache__/env_runner.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/utils/debugging.py b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/utils/debugging.py new file mode 100644 index 0000000000000000000000000000000000000000..7ddbd8341ddb883a8be02ec7db90733141800a89 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/utils/debugging.py @@ -0,0 +1,190 @@ +import gymnasium as gym +import numpy as np +from PIL import Image, ImageDraw + +from gymnasium.envs.classic_control.cartpole import CartPoleEnv + +from ray.rllib.utils.framework import try_import_tf + +_, tf, _ = try_import_tf() + + +class CartPoleDebug(CartPoleEnv): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + low = np.concatenate([np.array([0.0]), self.observation_space.low]) + high = np.concatenate([np.array([1000.0]), self.observation_space.high]) + + self.observation_space = gym.spaces.Box(low, high, shape=(5,), dtype=np.float32) + + self.timesteps_ = 0 + self._next_action = 0 + self._seed = 1 + + def reset(self, *, seed=None, options=None): + ret = super().reset(seed=self._seed) + self._seed += 1 + self.timesteps_ = 0 + self._next_action = 0 + obs = np.concatenate([np.array([self.timesteps_]), ret[0]]) + return obs, ret[1] + + def step(self, action): + ret = super().step(self._next_action) + + self.timesteps_ += 1 + self._next_action = 0 if self._next_action else 1 + + obs = np.concatenate([np.array([self.timesteps_]), ret[0]]) + reward = 0.1 * self.timesteps_ + return (obs, reward) + ret[2:] + + +gym.register("CartPoleDebug-v0", CartPoleDebug) +cartpole_env = gym.make("CartPoleDebug-v0", render_mode="rgb_array") +cartpole_env.reset() + +frozenlake_env = gym.make( + "FrozenLake-v1", render_mode="rgb_array", is_slippery=False, map_name="4x4" +) # desc=["SF", "HG"]) +frozenlake_env.reset() + + +def create_cartpole_dream_image( + dreamed_obs, # real space (not symlog'd) + dreamed_V, # real space (not symlog'd) + dreamed_a, + dreamed_r_tp1, # real space (not symlog'd) + dreamed_ri_tp1, # intrinsic reward + dreamed_c_tp1, # continue flag + value_target, # real space (not symlog'd) + initial_h, + as_tensor=False, +): + # CartPoleDebug + if dreamed_obs.shape == (5,): + # Set the state of our env to the given observation. + cartpole_env.unwrapped.state = np.array(dreamed_obs[1:], dtype=np.float32) + # Normal CartPole-v1 + else: + cartpole_env.unwrapped.state = np.array(dreamed_obs, dtype=np.float32) + + # Produce an RGB-image of the current state. + rgb_array = cartpole_env.render() + + # Add value-, action-, reward-, and continue-prediction information. + image = Image.fromarray(rgb_array) + draw_obj = ImageDraw.Draw(image) + + # fnt = ImageFont.load_default(size=40) + + draw_obj.text( + (5, 6), f"Vt={dreamed_V:.2f} (Rt={value_target:.2f})", fill=(0, 0, 0) + ) # , font=fnt.font, size=30) + draw_obj.text( + (5, 18), + f"at={'<--' if dreamed_a == 0 else '-->'} ({dreamed_a})", + fill=(0, 0, 0), + ) + draw_obj.text((5, 30), f"rt+1={dreamed_r_tp1:.2f}", fill=(0, 0, 0)) + if dreamed_ri_tp1 is not None: + draw_obj.text((5, 42), f"rit+1={dreamed_ri_tp1:.6f}", fill=(0, 0, 0)) + draw_obj.text((5, 54), f"ct+1={dreamed_c_tp1}", fill=(0, 0, 0)) + draw_obj.text((5, 66), f"|h|t={np.mean(np.abs(initial_h)):.5f}", fill=(0, 0, 0)) + + if dreamed_obs.shape == (5,): + draw_obj.text((20, 100), f"t={dreamed_obs[0]}", fill=(0, 0, 0)) + + # Return image. + np_img = np.asarray(image) + if as_tensor: + return tf.convert_to_tensor(np_img, dtype=tf.uint8) + return np_img + + +def create_frozenlake_dream_image( + dreamed_obs, # real space (not symlog'd) + dreamed_V, # real space (not symlog'd) + dreamed_a, + dreamed_r_tp1, # real space (not symlog'd) + dreamed_ri_tp1, # intrinsic reward + dreamed_c_tp1, # continue flag + value_target, # real space (not symlog'd) + initial_h, + as_tensor=False, +): + frozenlake_env.unwrapped.s = np.argmax(dreamed_obs, axis=0) + + # Produce an RGB-image of the current state. + rgb_array = frozenlake_env.render() + + # Add value-, action-, reward-, and continue-prediction information. + image = Image.fromarray(rgb_array) + draw_obj = ImageDraw.Draw(image) + + draw_obj.text((5, 6), f"Vt={dreamed_V:.2f} (Rt={value_target:.2f})", fill=(0, 0, 0)) + action_arrow = ( + "<--" + if dreamed_a == 0 + else "v" + if dreamed_a == 1 + else "-->" + if dreamed_a == 2 + else "^" + ) + draw_obj.text((5, 18), f"at={action_arrow} ({dreamed_a})", fill=(0, 0, 0)) + draw_obj.text((5, 30), f"rt+1={dreamed_r_tp1:.2f}", fill=(0, 0, 0)) + if dreamed_ri_tp1 is not None: + draw_obj.text((5, 42), f"rit+1={dreamed_ri_tp1:.6f}", fill=(0, 0, 0)) + draw_obj.text((5, 54), f"ct+1={dreamed_c_tp1}", fill=(0, 0, 0)) + draw_obj.text((5, 66), f"|h|t={np.mean(np.abs(initial_h)):.5f}", fill=(0, 0, 0)) + + # Return image. + np_img = np.asarray(image) + if as_tensor: + return tf.convert_to_tensor(np_img, dtype=tf.uint8) + return np_img + + +if __name__ == "__main__": + # CartPole debug. + rgb_array = create_cartpole_dream_image( + dreamed_obs=np.array([100.0, 1.0, -0.01, 1.5, 0.02]), + dreamed_V=4.3, + dreamed_a=1, + dreamed_r_tp1=1.0, + dreamed_c_tp1=True, + initial_h=0.0, + value_target=8.0, + ) + # ImageFont.load("arial.pil") + image = Image.fromarray(rgb_array) + image.show() + + # Normal CartPole. + rgb_array = create_cartpole_dream_image( + dreamed_obs=np.array([1.0, -0.01, 1.5, 0.02]), + dreamed_V=4.3, + dreamed_a=1, + dreamed_r_tp1=1.0, + dreamed_c_tp1=True, + initial_h=0.1, + value_target=8.0, + ) + # ImageFont.load("arial.pil") + image = Image.fromarray(rgb_array) + image.show() + + # Frozenlake + rgb_array = create_frozenlake_dream_image( + dreamed_obs=np.array([1.0] + [0.0] * (frozenlake_env.observation_space.n - 1)), + dreamed_V=4.3, + dreamed_a=1, + dreamed_r_tp1=1.0, + dreamed_c_tp1=True, + initial_h=0.1, + value_target=8.0, + ) + image = Image.fromarray(rgb_array) + image.show() diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/utils/summaries.py b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/utils/summaries.py new file mode 100644 index 0000000000000000000000000000000000000000..c8b0ea753d4d2c078a2bfd14d80590b5161e2469 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/utils/summaries.py @@ -0,0 +1,408 @@ +""" +[1] Mastering Diverse Domains through World Models - 2023 +D. Hafner, J. Pasukonis, J. Ba, T. Lillicrap +https://arxiv.org/pdf/2301.04104v1.pdf + +[2] Mastering Atari with Discrete World Models - 2021 +D. Hafner, T. Lillicrap, M. Norouzi, J. Ba +https://arxiv.org/pdf/2010.02193.pdf +""" +import numpy as np + +from ray.rllib.algorithms.dreamerv3.utils.debugging import ( + create_cartpole_dream_image, + create_frozenlake_dream_image, +) +from ray.rllib.core import DEFAULT_MODULE_ID +from ray.rllib.core.columns import Columns +from ray.rllib.utils.framework import try_import_torch +from ray.rllib.utils.metrics import ( + LEARNER_RESULTS, + REPLAY_BUFFER_RESULTS, +) +from ray.rllib.utils.tf_utils import inverse_symlog + +torch, _ = try_import_torch() + + +def reconstruct_obs_from_h_and_z( + h_t0_to_H, + z_t0_to_H, + dreamer_model, + obs_dims_shape, + framework="torch", +): + """Returns""" + shape = h_t0_to_H.shape + T = shape[0] # inputs are time-major + B = shape[1] + # Compute actual observations using h and z and the decoder net. + # Note that the last h-state (T+1) is NOT used here as it's already part of + # a new trajectory. + # Use mean() of the Gaussian, no sample! -> No need to construct dist object here. + if framework == "torch": + device = next(iter(dreamer_model.world_model.decoder.parameters())).device + reconstructed_obs_distr_means_TxB = ( + dreamer_model.world_model.decoder( + # Fold time rank. + h=torch.from_numpy(h_t0_to_H).reshape((T * B, -1)).to(device), + z=torch.from_numpy(z_t0_to_H) + .reshape((T * B,) + z_t0_to_H.shape[2:]) + .to(device), + ) + .detach() + .cpu() + .numpy() + ) + else: + reconstructed_obs_distr_means_TxB = dreamer_model.world_model.decoder( + # Fold time rank. + h=h_t0_to_H.reshape((T * B, -1)), + z=z_t0_to_H.reshape((T * B,) + z_t0_to_H.shape[2:]), + ) + + # Unfold time rank again. + reconstructed_obs_T_B = np.reshape( + reconstructed_obs_distr_means_TxB, (T, B) + obs_dims_shape + ) + # Return inverse symlog'd (real env obs space) reconstructed observations. + return reconstructed_obs_T_B + + +def report_dreamed_trajectory( + *, + results, + env, + dreamer_model, + obs_dims_shape, + batch_indices=(0,), + desc=None, + include_images=True, + framework="torch", +): + if not include_images: + return + + dream_data = results["dream_data"] + dreamed_obs_H_B = reconstruct_obs_from_h_and_z( + h_t0_to_H=dream_data["h_states_t0_to_H_BxT"], + z_t0_to_H=dream_data["z_states_prior_t0_to_H_BxT"], + dreamer_model=dreamer_model, + obs_dims_shape=obs_dims_shape, + framework=framework, + ) + func = ( + create_cartpole_dream_image + if env.startswith("CartPole") + else create_frozenlake_dream_image + ) + # Take 0th dreamed trajectory and produce series of images. + for b in batch_indices: + images = [] + for t in range(len(dreamed_obs_H_B) - 1): + images.append( + func( + dreamed_obs=dreamed_obs_H_B[t][b], + dreamed_V=dream_data["values_dreamed_t0_to_H_BxT"][t][b], + dreamed_a=(dream_data["actions_ints_dreamed_t0_to_H_BxT"][t][b]), + dreamed_r_tp1=(dream_data["rewards_dreamed_t0_to_H_BxT"][t + 1][b]), + # `DISAGREE_intrinsic_rewards_H_B` are shifted by 1 already + # (from t1 to H, not t0 to H like all other data here). + dreamed_ri_tp1=( + results["DISAGREE_intrinsic_rewards_H_BxT"][t][b] + if "DISAGREE_intrinsic_rewards_H_BxT" in results + else None + ), + dreamed_c_tp1=( + dream_data["continues_dreamed_t0_to_H_BxT"][t + 1][b] + ), + value_target=results["VALUE_TARGETS_H_BxT"][t][b], + initial_h=dream_data["h_states_t0_to_H_BxT"][t][b], + as_tensor=True, + ).numpy() + ) + # Concat images along width-axis (so they show as a "film sequence" next to each + # other). + results.update( + { + f"dreamed_trajectories{('_'+desc) if desc else ''}_B{b}": ( + np.concatenate(images, axis=1) + ), + } + ) + + +def report_predicted_vs_sampled_obs( + *, + metrics, + sample, + batch_size_B, + batch_length_T, + symlog_obs: bool = True, + do_report: bool = True, +): + """Summarizes sampled data (from the replay buffer) vs world-model predictions. + + World model predictions are based on the posterior states (z computed from actual + observation encoder input + the current h-states). + + Observations: Computes MSE (sampled vs predicted/recreated) over all features. + For image observations, also creates direct image comparisons (sampled images + vs predicted (posterior) ones). + Rewards: Compute MSE (sampled vs predicted). + Continues: Compute MSE (sampled vs predicted). + + Args: + metrics: The MetricsLogger object of the DreamerV3 algo. + sample: The sampled data (dict) from the replay buffer. Already tf-tensor + converted. + batch_size_B: The batch size (B). This is the number of trajectories sampled + from the buffer. + batch_length_T: The batch length (T). This is the length of an individual + trajectory sampled from the buffer. + do_report: Whether to actually log the report (default). If this is set to + False, this function serves as a clean-up on the given metrics, making sure + they do NOT contain anymore any (spacious) data relevant for producing + the report/videos. + """ + fwd_output_key = ( + LEARNER_RESULTS, + DEFAULT_MODULE_ID, + "WORLD_MODEL_fwd_out_obs_distribution_means_b0xT", + ) + # logged as a non-reduced item (still a list) + predicted_observation_means_single_example = metrics.peek( + fwd_output_key, default=[None] + )[-1] + metrics.delete(fwd_output_key, key_error=False) + + final_result_key = ( + f"WORLD_MODEL_sampled_vs_predicted_posterior_b0x{batch_length_T}_videos" + ) + if not do_report: + metrics.delete(final_result_key, key_error=False) + return + + _report_obs( + metrics=metrics, + computed_float_obs_B_T_dims=np.reshape( + predicted_observation_means_single_example, + # WandB videos need to be channels first. + (1, batch_length_T) + sample[Columns.OBS].shape[2:], + ), + sampled_obs_B_T_dims=sample[Columns.OBS][0:1], + metrics_key=final_result_key, + symlog_obs=symlog_obs, + ) + + +def report_dreamed_eval_trajectory_vs_samples( + *, + metrics, + sample, + burn_in_T, + dreamed_T, + dreamer_model, + symlog_obs: bool = True, + do_report: bool = True, + framework="torch", +) -> None: + """Logs dreamed observations, rewards, continues and compares them vs sampled data. + + For obs, we'll try to create videos (side-by-side comparison) of the dreamed, + recreated-from-prior obs vs the sampled ones (over dreamed_T timesteps). + + Args: + metrics: The MetricsLogger object of the DreamerV3 algo. + sample: The sampled data (dict) from the replay buffer. Already tf-tensor + converted. + burn_in_T: The number of burn-in timesteps (these will be skipped over in the + reported video comparisons and MSEs). + dreamed_T: The number of timesteps to produce dreamed data for. + dreamer_model: The DreamerModel to use to create observation vectors/images + from dreamed h- and (prior) z-states. + symlog_obs: Whether to inverse-symlog the computed observations or not. Set this + to True for environments, in which we should symlog the observations. + do_report: Whether to actually log the report (default). If this is set to + False, this function serves as a clean-up on the given metrics, making sure + they do NOT contain anymore any (spacious) data relevant for producing + the report/videos. + """ + dream_data = metrics.peek( + (LEARNER_RESULTS, DEFAULT_MODULE_ID, "dream_data"), + default={}, + ) + metrics.delete(LEARNER_RESULTS, DEFAULT_MODULE_ID, "dream_data", key_error=False) + + final_result_key_obs = f"EVALUATION_sampled_vs_dreamed_prior_H{dreamed_T}_obs" + final_result_key_rew = ( + f"EVALUATION_sampled_vs_dreamed_prior_H{dreamed_T}_rewards_MSE" + ) + final_result_key_cont = ( + f"EVALUATION_sampled_vs_dreamed_prior_H{dreamed_T}_continues_MSE" + ) + if not do_report: + metrics.delete(final_result_key_obs, key_error=False) + metrics.delete(final_result_key_rew, key_error=False) + metrics.delete(final_result_key_cont, key_error=False) + return + + # Obs MSE. + dreamed_obs_H_B = reconstruct_obs_from_h_and_z( + h_t0_to_H=dream_data["h_states_t0_to_H_Bx1"][0], # [0] b/c reduce=None (list) + z_t0_to_H=dream_data["z_states_prior_t0_to_H_Bx1"][0], + dreamer_model=dreamer_model, + obs_dims_shape=sample[Columns.OBS].shape[2:], + framework=framework, + ) + t0 = burn_in_T + tH = t0 + dreamed_T + # Observation MSE and - if applicable - images comparisons. + _report_obs( + metrics=metrics, + # WandB videos need to be 5D (B, L, c, h, w) -> transpose/swap H and B axes. + computed_float_obs_B_T_dims=np.swapaxes(dreamed_obs_H_B, 0, 1)[ + 0:1 + ], # for now: only B=1 + sampled_obs_B_T_dims=sample[Columns.OBS][0:1, t0:tH], + metrics_key=final_result_key_obs, + symlog_obs=symlog_obs, + ) + + # Reward MSE. + _report_rewards( + metrics=metrics, + computed_rewards=dream_data["rewards_dreamed_t0_to_H_Bx1"][0], + sampled_rewards=sample[Columns.REWARDS][:, t0:tH], + metrics_key=final_result_key_rew, + ) + + # Continues MSE. + _report_continues( + metrics=metrics, + computed_continues=dream_data["continues_dreamed_t0_to_H_Bx1"][0], + sampled_continues=(1.0 - sample["is_terminated"])[:, t0:tH], + metrics_key=final_result_key_cont, + ) + + +def report_sampling_and_replay_buffer(*, metrics, replay_buffer): + episodes_in_buffer = replay_buffer.get_num_episodes() + ts_in_buffer = replay_buffer.get_num_timesteps() + replayed_steps = replay_buffer.get_sampled_timesteps() + added_steps = replay_buffer.get_added_timesteps() + + # Summarize buffer, sampling, and train ratio stats. + metrics.log_dict( + { + "capacity": replay_buffer.capacity, + "size_num_episodes": episodes_in_buffer, + "size_timesteps": ts_in_buffer, + "replayed_steps": replayed_steps, + "added_steps": added_steps, + }, + key=REPLAY_BUFFER_RESULTS, + window=1, + ) # window=1 b/c these are current (total count/state) values. + + +def _report_obs( + *, + metrics, + computed_float_obs_B_T_dims, + sampled_obs_B_T_dims, + metrics_key, + symlog_obs, +): + """Summarizes computed- vs sampled observations: MSE and (if applicable) images. + + Args: + metrics: The MetricsLogger object of the DreamerV3 algo. + computed_float_obs_B_T_dims: Computed float observations + (not clipped, not cast'd). Shape=(B, T, [dims ...]). + sampled_obs_B_T_dims: Sampled observations (as-is from the environment, meaning + this could be uint8, 0-255 clipped images). Shape=(B, T, [dims ...]). + metrics_key: The metrics key (or key sequence) under which to log ths resulting + video sequence. + symlog_obs: Whether to inverse-symlog the computed observations or not. Set this + to True for environments, in which we should symlog the observations. + + """ + # Videos: Create summary, comparing computed images with actual sampled ones. + # 4=[B, T, w, h] grayscale image; 5=[B, T, w, h, C] RGB image. + if len(sampled_obs_B_T_dims.shape) in [4, 5]: + # WandB videos need to be channels first. + transpose_axes = ( + (0, 1, 4, 2, 3) if len(sampled_obs_B_T_dims.shape) == 5 else (0, 3, 1, 2) + ) + + if symlog_obs: + computed_float_obs_B_T_dims = inverse_symlog(computed_float_obs_B_T_dims) + + # Restore image pixels from normalized (non-symlog'd) data. + if not symlog_obs: + computed_float_obs_B_T_dims = (computed_float_obs_B_T_dims + 1.0) * 128 + sampled_obs_B_T_dims = (sampled_obs_B_T_dims + 1.0) * 128 + sampled_obs_B_T_dims = np.clip(sampled_obs_B_T_dims, 0.0, 255.0).astype( + np.uint8 + ) + sampled_obs_B_T_dims = np.transpose(sampled_obs_B_T_dims, transpose_axes) + computed_images = np.clip(computed_float_obs_B_T_dims, 0.0, 255.0).astype( + np.uint8 + ) + computed_images = np.transpose(computed_images, transpose_axes) + # Concat sampled and computed images along the height axis (3) such that + # real images show below respective predicted ones. + # (B, T, C, h, w) + sampled_vs_computed_images = np.concatenate( + [computed_images, sampled_obs_B_T_dims], + axis=-1, # concat on width axis (looks nicer) + ) + # Add grayscale dim, if necessary. + if len(sampled_obs_B_T_dims.shape) == 2 + 2: + sampled_vs_computed_images = np.expand_dims(sampled_vs_computed_images, -1) + + metrics.log_value( + metrics_key, + sampled_vs_computed_images, + reduce=None, # No reduction, we want the obs tensor to stay in-tact. + window=1, + ) + + +def _report_rewards( + *, + metrics, + computed_rewards, + sampled_rewards, + metrics_key, +): + mse_sampled_vs_computed_rewards = np.mean( + np.square(computed_rewards - sampled_rewards) + ) + mse_sampled_vs_computed_rewards = np.mean(mse_sampled_vs_computed_rewards) + metrics.log_value( + metrics_key, + mse_sampled_vs_computed_rewards, + window=1, + ) + + +def _report_continues( + *, + metrics, + computed_continues, + sampled_continues, + metrics_key, +): + # Continue MSE. + mse_sampled_vs_computed_continues = np.mean( + np.square( + computed_continues - sampled_continues.astype(computed_continues.dtype) + ) + ) + metrics.log_value( + metrics_key, + mse_sampled_vs_computed_continues, + window=1, + ) diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/impala/impala_learner.py b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/impala/impala_learner.py new file mode 100644 index 0000000000000000000000000000000000000000..1929f9f010d61fb63fb62e72e7b1ee6408014c63 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/impala/impala_learner.py @@ -0,0 +1,367 @@ +from collections import deque +import copy +import queue +import threading +import time +from typing import Any, Dict, List, Union + +import tree # pip install dm_tree + +import ray +from ray.rllib.algorithms.appo.utils import CircularBuffer +from ray.rllib.algorithms.impala.impala import LEARNER_RESULTS_CURR_ENTROPY_COEFF_KEY +from ray.rllib.core.columns import Columns +from ray.rllib.core.learner.learner import Learner +from ray.rllib.connectors.common import NumpyToTensor +from ray.rllib.connectors.learner import AddOneTsToEpisodesAndTruncate +from ray.rllib.policy.sample_batch import MultiAgentBatch, SampleBatch +from ray.rllib.utils.annotations import ( + override, + OverrideToImplementCustomLogic_CallToSuperRecommended, +) +from ray.rllib.utils.framework import try_import_torch +from ray.rllib.utils.lambda_defaultdict import LambdaDefaultDict +from ray.rllib.utils.metrics import ( + ALL_MODULES, + NUM_ENV_STEPS_SAMPLED_LIFETIME, + NUM_ENV_STEPS_TRAINED, +) +from ray.rllib.utils.metrics.metrics_logger import MetricsLogger +from ray.rllib.utils.schedules.scheduler import Scheduler +from ray.rllib.utils.typing import EpisodeType, ModuleID, ResultDict + +torch, _ = try_import_torch() + +GPU_LOADER_QUEUE_WAIT_TIMER = "gpu_loader_queue_wait_timer" +GPU_LOADER_LOAD_TO_GPU_TIMER = "gpu_loader_load_to_gpu_timer" +LEARNER_THREAD_IN_QUEUE_WAIT_TIMER = "learner_thread_in_queue_wait_timer" +LEARNER_THREAD_ENV_STEPS_DROPPED = "learner_thread_env_steps_dropped" +LEARNER_THREAD_UPDATE_TIMER = "learner_thread_update_timer" +RAY_GET_EPISODES_TIMER = "ray_get_episodes_timer" +EPISODES_TO_BATCH_TIMER = "episodes_to_batch_timer" + +QUEUE_SIZE_GPU_LOADER_QUEUE = "queue_size_gpu_loader_queue" +QUEUE_SIZE_LEARNER_THREAD_QUEUE = "queue_size_learner_thread_queue" +QUEUE_SIZE_RESULTS_QUEUE = "queue_size_results_queue" + + +class IMPALALearner(Learner): + @override(Learner) + def build(self) -> None: + super().build() + + # Dict mapping module IDs to the respective entropy Scheduler instance. + self.entropy_coeff_schedulers_per_module: Dict[ + ModuleID, Scheduler + ] = LambdaDefaultDict( + lambda module_id: Scheduler( + fixed_value_or_schedule=( + self.config.get_config_for_module(module_id).entropy_coeff + ), + framework=self.framework, + device=self._device, + ) + ) + + # Extend all episodes by one artificial timestep to allow the value function net + # to compute the bootstrap values (and add a mask to the batch to know, which + # slots to mask out). + if ( + self._learner_connector is not None + and self.config.add_default_connectors_to_learner_pipeline + ): + self._learner_connector.prepend(AddOneTsToEpisodesAndTruncate()) + # Leave all batches on the CPU (they'll be moved to the GPU, if applicable, + # by the n GPU loader threads). + numpy_to_tensor_connector = self._learner_connector[NumpyToTensor][0] + numpy_to_tensor_connector._device = "cpu" # TODO (sven): Provide API? + + # Create and start the GPU-loader thread. It picks up train-ready batches from + # the "GPU-loader queue" and loads them to the GPU, then places the GPU batches + # on the "update queue" for the actual RLModule forward pass and loss + # computations. + self._gpu_loader_in_queue = queue.Queue() + # Default is to have a learner thread. + if not hasattr(self, "_learner_thread_in_queue"): + self._learner_thread_in_queue = deque(maxlen=self.config.learner_queue_size) + self._learner_thread_out_queue = queue.Queue() + + # Create and start the GPU loader thread(s). + if self.config.num_gpus_per_learner > 0: + self._gpu_loader_threads = [ + _GPULoaderThread( + in_queue=self._gpu_loader_in_queue, + out_queue=self._learner_thread_in_queue, + device=self._device, + metrics_logger=self.metrics, + ) + for _ in range(self.config.num_gpu_loader_threads) + ] + for t in self._gpu_loader_threads: + t.start() + + # Create and start the Learner thread. + self._learner_thread = _LearnerThread( + update_method=self._update_from_batch_or_episodes, + in_queue=self._learner_thread_in_queue, + out_queue=self._learner_thread_out_queue, + metrics_logger=self.metrics, + ) + self._learner_thread.start() + + @override(Learner) + def update_from_episodes( + self, + episodes: List[EpisodeType], + *, + timesteps: Dict[str, Any], + **kwargs, + ) -> ResultDict: + self.metrics.set_value( + (ALL_MODULES, NUM_ENV_STEPS_SAMPLED_LIFETIME), + timesteps[NUM_ENV_STEPS_SAMPLED_LIFETIME], + ) + + # TODO (sven): IMPALA does NOT call additional update anymore from its + # `training_step()` method. Instead, we'll do this here (to avoid the extra + # metrics.reduce() call -> we should only call this once per update round). + self.before_gradient_based_update(timesteps=timesteps) + + with self.metrics.log_time((ALL_MODULES, RAY_GET_EPISODES_TIMER)): + # Resolve batch/episodes being ray object refs (instead of + # actual batch/episodes objects). + # If this fails, it might be because some of the EnvRunners that collected + # `episodes` are down (ex. SPOT preemption or single EnvRunner crash). + # In this case, we should ignore those List[EpisodeType] references and not + # use these for the train batch. + try: + episodes = ray.get(episodes) + episodes_flat = tree.flatten(episodes) + except ray.exceptions.RayError: + # Try unreferencing one by one and collect those that are ok. + episodes_flat = [] + for e in episodes: + try: + episodes_flat.extend(ray.get(e)) + # Ignore exceptions and move on with other references. + except Exception: + pass + + env_steps = sum(map(len, episodes_flat)) + + # Only send a batch to the learner pipeline if its size is > 0. + if env_steps > 0: + # Call the learner connector pipeline. + with self.metrics.log_time((ALL_MODULES, EPISODES_TO_BATCH_TIMER)): + batch = self._learner_connector( + rl_module=self.module, + batch={}, + episodes=episodes_flat, + shared_data={}, + ) + + # Queue the CPU batch to the GPU-loader thread. + if self.config.num_gpus_per_learner > 0: + self._gpu_loader_in_queue.put((batch, env_steps)) + self.metrics.log_value( + (ALL_MODULES, QUEUE_SIZE_GPU_LOADER_QUEUE), + self._gpu_loader_in_queue.qsize(), + ) + else: + ma_batch = MultiAgentBatch( + {mid: SampleBatch(b) for mid, b in batch.items()}, + env_steps=env_steps, + ) + # Add the batch directly to the circular buffer. + if isinstance(self._learner_thread_in_queue, CircularBuffer): + ts_dropped = self._learner_thread_in_queue.add(ma_batch) + self.metrics.log_value( + (ALL_MODULES, LEARNER_THREAD_ENV_STEPS_DROPPED), + ts_dropped, + reduce="sum", + ) + else: + # Enqueue to Learner thread's in-queue. + _LearnerThread.enqueue( + self._learner_thread_in_queue, + ma_batch, + self.metrics, + ) + + # Return all queued result dicts thus far (after reducing over them). + results = {} + ts_trained = 0 + try: + while True: + results = self._learner_thread_out_queue.get(block=False) + ts_trained += results[ALL_MODULES][NUM_ENV_STEPS_TRAINED].peek() + except queue.Empty: + if ts_trained: + results[ALL_MODULES][NUM_ENV_STEPS_TRAINED].values = [ts_trained] + return results + + @OverrideToImplementCustomLogic_CallToSuperRecommended + def before_gradient_based_update(self, *, timesteps: Dict[str, Any]) -> None: + super().before_gradient_based_update(timesteps=timesteps) + + for module_id in self.module.keys(): + # Update entropy coefficient via our Scheduler. + new_entropy_coeff = self.entropy_coeff_schedulers_per_module[ + module_id + ].update(timestep=timesteps.get(NUM_ENV_STEPS_SAMPLED_LIFETIME, 0)) + self.metrics.log_value( + (module_id, LEARNER_RESULTS_CURR_ENTROPY_COEFF_KEY), + new_entropy_coeff, + window=1, + ) + + @override(Learner) + def remove_module(self, module_id: str): + super().remove_module(module_id) + self.entropy_coeff_schedulers_per_module.pop(module_id) + + +ImpalaLearner = IMPALALearner + + +class _GPULoaderThread(threading.Thread): + def __init__( + self, + *, + in_queue: queue.Queue, + out_queue: deque, + device: torch.device, + metrics_logger: MetricsLogger, + ): + super().__init__() + self.daemon = True + + self._in_queue = in_queue + self._out_queue = out_queue + self._ts_dropped = 0 + self._device = device + self.metrics = metrics_logger + + def run(self) -> None: + while True: + self._step() + + def _step(self) -> None: + # Only measure time, if we have a `metrics` instance. + with self.metrics.log_time((ALL_MODULES, GPU_LOADER_QUEUE_WAIT_TIMER)): + # Get a new batch from the data (inqueue). + batch_on_cpu, env_steps = self._in_queue.get() + + with self.metrics.log_time((ALL_MODULES, GPU_LOADER_LOAD_TO_GPU_TIMER)): + # Load the batch onto the GPU device. + batch_on_gpu = tree.map_structure_with_path( + lambda path, t: ( + t + if isinstance(path, tuple) and Columns.INFOS in path + else t.to(self._device, non_blocking=True) + ), + batch_on_cpu, + ) + ma_batch_on_gpu = MultiAgentBatch( + policy_batches={mid: SampleBatch(b) for mid, b in batch_on_gpu.items()}, + env_steps=env_steps, + ) + + if isinstance(self._out_queue, CircularBuffer): + ts_dropped = self._out_queue.add(ma_batch_on_gpu) + self.metrics.log_value( + (ALL_MODULES, LEARNER_THREAD_ENV_STEPS_DROPPED), + ts_dropped, + reduce="sum", + ) + else: + # Enqueue to Learner thread's in-queue. + _LearnerThread.enqueue(self._out_queue, ma_batch_on_gpu, self.metrics) + + +class _LearnerThread(threading.Thread): + def __init__( + self, + *, + update_method, + in_queue: deque, + out_queue: queue.Queue, + metrics_logger, + ): + super().__init__() + self.daemon = True + self.metrics: MetricsLogger = metrics_logger + self.stopped = False + + self._update_method = update_method + self._in_queue: Union[deque, CircularBuffer] = in_queue + self._out_queue: queue.Queue = out_queue + + def run(self) -> None: + while not self.stopped: + self.step() + + def step(self): + # Get a new batch from the GPU-data (deque.pop -> newest item first). + with self.metrics.log_time((ALL_MODULES, LEARNER_THREAD_IN_QUEUE_WAIT_TIMER)): + # Get a new batch from the GPU-data (learner queue OR circular buffer). + if isinstance(self._in_queue, CircularBuffer): + ma_batch_on_gpu = self._in_queue.sample() + else: + # Queue is empty: Sleep a tiny bit to avoid CPU-thrashing. + if not self._in_queue: + time.sleep(0.001) + return + # Consume from the left (oldest batches first). + # If we consumed from the right, we would run into the danger of + # learning from newer batches (left side) most times, BUT sometimes + # grabbing older batches (right area of deque). + ma_batch_on_gpu = self._in_queue.popleft() + + # Call the update method on the batch. + with self.metrics.log_time((ALL_MODULES, LEARNER_THREAD_UPDATE_TIMER)): + # TODO (sven): For multi-agent AND SGD iter > 1, we need to make sure + # this thread has the information about the min minibatches necessary + # (due to different agents taking different steps in the env, e.g. + # MA-CartPole). + results = self._update_method( + batch=ma_batch_on_gpu, + timesteps={ + NUM_ENV_STEPS_SAMPLED_LIFETIME: self.metrics.peek( + (ALL_MODULES, NUM_ENV_STEPS_SAMPLED_LIFETIME), default=0 + ) + }, + ) + # We have to deepcopy the results dict, b/c we must avoid having a returned + # Stats object sit in the queue and getting a new (possibly even tensor) + # value added to it, which would falsify this result. + self._out_queue.put(copy.deepcopy(results)) + + self.metrics.log_value( + (ALL_MODULES, QUEUE_SIZE_RESULTS_QUEUE), + self._out_queue.qsize(), + ) + + @staticmethod + def enqueue(learner_queue: deque, batch, metrics_logger): + # Right-append to learner queue (a deque). If full, drops the leftmost + # (oldest) item in the deque. + # Note that we consume from the left (oldest first), which is why the queue size + # should probably always be small'ish (<< 10), otherwise we run into the danger + # of training with very old samples. + # If we consumed from the right, we would run into the danger of learning + # from newer batches (left side) most times, BUT sometimes grabbing a + # really old batches (right area of deque). + if len(learner_queue) == learner_queue.maxlen: + metrics_logger.log_value( + (ALL_MODULES, LEARNER_THREAD_ENV_STEPS_DROPPED), + learner_queue.popleft().env_steps(), + reduce="sum", + ) + learner_queue.append(batch) + + # Log current queue size. + metrics_logger.log_value( + (ALL_MODULES, QUEUE_SIZE_LEARNER_THREAD_QUEUE), + len(learner_queue), + ) diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/sac/__pycache__/__init__.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/sac/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6777cf10ba32dbaf0ab8e516836a4499f77fe41 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/sac/__pycache__/__init__.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/sac/__pycache__/sac_catalog.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/sac/__pycache__/sac_catalog.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eec9071cfba0cee225f41c7c37b18e1211d86340 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/sac/__pycache__/sac_catalog.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/sac/__pycache__/sac_rl_module.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/sac/__pycache__/sac_rl_module.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e5c87e48ab636264c88d570ae21e82724b574472 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/sac/__pycache__/sac_rl_module.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/sac/__pycache__/sac_tf_model.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/sac/__pycache__/sac_tf_model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..96d7cf1568c45d1ba6e8ec63a40a51f25d80ace3 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/sac/__pycache__/sac_tf_model.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/sac/sac_learner.py b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/sac/sac_learner.py new file mode 100644 index 0000000000000000000000000000000000000000..58703174742ae7948eee482591a3394e193f9269 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/sac/sac_learner.py @@ -0,0 +1,77 @@ +import numpy as np + +from typing import Dict + +from ray.rllib.algorithms.dqn.dqn_rainbow_learner import DQNRainbowLearner +from ray.rllib.core.learner.learner import Learner +from ray.rllib.utils.annotations import override +from ray.rllib.utils.lambda_defaultdict import LambdaDefaultDict +from ray.rllib.utils.typing import ModuleID, TensorType + +# Now, this is double defined: In `SACRLModule` and here. I would keep it here +# or push it into the `Learner` as these are recurring keys in RL. +LOGPS_KEY = "logps" +QF_LOSS_KEY = "qf_loss" +QF_MEAN_KEY = "qf_mean" +QF_MAX_KEY = "qf_max" +QF_MIN_KEY = "qf_min" +QF_PREDS = "qf_preds" +QF_TWIN_LOSS_KEY = "qf_twin_loss" +QF_TWIN_PREDS = "qf_twin_preds" +TD_ERROR_MEAN_KEY = "td_error_mean" +CRITIC_TARGET = "critic_target" +ACTION_DIST_INPUTS_NEXT = "action_dist_inputs_next" + + +class SACLearner(DQNRainbowLearner): + @override(Learner) + def build(self) -> None: + # Store the current alpha in log form. We need it during optimization + # in log form. + self.curr_log_alpha: Dict[ModuleID, TensorType] = LambdaDefaultDict( + lambda module_id: self._get_tensor_variable( + # Note, we want to train the temperature parameter. + [ + np.log( + self.config.get_config_for_module(module_id).initial_alpha + ).astype(np.float32) + ], + trainable=True, + ) + ) + + # We need to call the `super()`'s `build()` method here to have the variables + # for the alpha already defined. + super().build() + + def get_target_entropy(module_id): + """Returns the target entropy to use for the loss. + + Args: + module_id: Module ID for which the target entropy should be + returned. + + Returns: + Target entropy. + """ + target_entropy = self.config.get_config_for_module(module_id).target_entropy + if target_entropy is None or target_entropy == "auto": + target_entropy = -np.prod( + self._module_spec.module_specs[module_id].action_space.shape + ) + return target_entropy + + self.target_entropy: Dict[ModuleID, TensorType] = LambdaDefaultDict( + lambda module_id: self._get_tensor_variable(get_target_entropy(module_id)) + ) + + @override(Learner) + def remove_module(self, module_id: ModuleID) -> None: + """Removes the temperature and target entropy. + + Note, this means that we also need to remove the corresponding + temperature optimizer. + """ + super().remove_module(module_id) + self.curr_log_alpha.pop(module_id, None) + self.target_entropy.pop(module_id, None) diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/sac/sac_rl_module.py b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/sac/sac_rl_module.py new file mode 100644 index 0000000000000000000000000000000000000000..d6d1b783d326c3d0f9a398a0edd58d561b9053d2 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/sac/sac_rl_module.py @@ -0,0 +1,172 @@ +from abc import abstractmethod +from typing import Any, Dict, List, Tuple + +from ray.rllib.algorithms.sac.sac_learner import ( + ACTION_DIST_INPUTS_NEXT, + QF_PREDS, + QF_TWIN_PREDS, +) +from ray.rllib.core.learner.utils import make_target_network +from ray.rllib.core.models.base import Encoder, Model +from ray.rllib.core.models.specs.typing import SpecType +from ray.rllib.core.rl_module.apis import InferenceOnlyAPI, TargetNetworkAPI +from ray.rllib.core.rl_module.rl_module import RLModule +from ray.rllib.policy.sample_batch import SampleBatch +from ray.rllib.utils.annotations import ( + override, + OverrideToImplementCustomLogic, +) +from ray.rllib.utils.typing import NetworkType +from ray.util.annotations import DeveloperAPI + + +@DeveloperAPI(stability="alpha") +class SACRLModule(RLModule, InferenceOnlyAPI, TargetNetworkAPI): + """`RLModule` for the Soft-Actor-Critic (SAC) algorithm. + + It consists of several architectures, each in turn composed of + two networks: an encoder and a head. + + The policy (actor) contains a state encoder (`pi_encoder`) and + a head (`pi_head`) that feeds into an action distribution (a + squashed Gaussian, i.e. outputs define the location and the log + scale parameters). + + In addition, two (or four in case `twin_q=True`) Q networks are + defined, the second one (and fourth, if `twin_q=True`) of them the + Q target network(s). All of these in turn are - similar to the + policy network - composed of an encoder and a head network. Each of + the encoders forms a state-action encoding that feeds into the + corresponding value heads to result in an estimation of the soft + action-value of SAC. + + The following graphics show the forward passes through this module: + [obs] -> [pi_encoder] -> [pi_head] -> [action_dist_inputs] + [obs, action] -> [qf_encoder] -> [qf_head] -> [q-value] + [obs, action] -> [qf_target_encoder] -> [qf_target_head] + -> [q-target-value] + --- + If `twin_q=True`: + [obs, action] -> [qf_twin_encoder] -> [qf_twin_head] -> [q-twin-value] + [obs, action] -> [qf_target_twin_encoder] -> [qf_target_twin_head] + -> [q-target-twin-value] + """ + + @override(RLModule) + def setup(self): + if self.catalog is None and hasattr(self, "_catalog_ctor_error"): + raise self._catalog_ctor_error + + # If a twin Q architecture should be used. + self.twin_q = self.model_config["twin_q"] + + # Build the encoder for the policy. + self.pi_encoder = self.catalog.build_encoder(framework=self.framework) + + if not self.inference_only or self.framework != "torch": + # SAC needs a separate Q network encoder (besides the pi network). + # This is because the Q network also takes the action as input + # (concatenated with the observations). + self.qf_encoder = self.catalog.build_qf_encoder(framework=self.framework) + + # If necessary, build also a twin Q encoders. + if self.twin_q: + self.qf_twin_encoder = self.catalog.build_qf_encoder( + framework=self.framework + ) + + # Build heads. + self.pi = self.catalog.build_pi_head(framework=self.framework) + + if not self.inference_only or self.framework != "torch": + self.qf = self.catalog.build_qf_head(framework=self.framework) + # If necessary build also a twin Q heads. + if self.twin_q: + self.qf_twin = self.catalog.build_qf_head(framework=self.framework) + + @override(TargetNetworkAPI) + def make_target_networks(self): + self.target_qf_encoder = make_target_network(self.qf_encoder) + self.target_qf = make_target_network(self.qf) + if self.twin_q: + self.target_qf_twin_encoder = make_target_network(self.qf_twin_encoder) + self.target_qf_twin = make_target_network(self.qf_twin) + + @override(InferenceOnlyAPI) + def get_non_inference_attributes(self) -> List[str]: + ret = ["qf", "target_qf", "qf_encoder", "target_qf_encoder"] + if self.twin_q: + ret += [ + "qf_twin", + "target_qf_twin", + "qf_twin_encoder", + "target_qf_twin_encoder", + ] + return ret + + @override(TargetNetworkAPI) + def get_target_network_pairs(self) -> List[Tuple[NetworkType, NetworkType]]: + """Returns target Q and Q network(s) to update the target network(s).""" + return [ + (self.qf_encoder, self.target_qf_encoder), + (self.qf, self.target_qf), + ] + ( + # If we have twin networks we need to update them, too. + [ + (self.qf_twin_encoder, self.target_qf_twin_encoder), + (self.qf_twin, self.target_qf_twin), + ] + if self.twin_q + else [] + ) + + # TODO (simon): SAC does not support RNNs, yet. + @override(RLModule) + def get_initial_state(self) -> dict: + # if hasattr(self.pi_encoder, "get_initial_state"): + # return { + # ACTOR: self.pi_encoder.get_initial_state(), + # CRITIC: self.qf_encoder.get_initial_state(), + # CRITIC_TARGET: self.qf_target_encoder.get_initial_state(), + # } + # else: + # return {} + return {} + + @override(RLModule) + def input_specs_train(self) -> SpecType: + return [ + SampleBatch.OBS, + SampleBatch.ACTIONS, + SampleBatch.NEXT_OBS, + ] + + @override(RLModule) + def output_specs_train(self) -> SpecType: + return ( + [ + QF_PREDS, + SampleBatch.ACTION_DIST_INPUTS, + ACTION_DIST_INPUTS_NEXT, + ] + + [QF_TWIN_PREDS] + if self.twin_q + else [] + ) + + @abstractmethod + @OverrideToImplementCustomLogic + def _qf_forward_train_helper( + self, batch: Dict[str, Any], encoder: Encoder, head: Model + ) -> Dict[str, Any]: + """Executes the forward pass for Q networks. + + Args: + batch: Dict containing a concatenated tensor with observations + and actions under the key `SampleBatch.OBS`. + encoder: An `Encoder` model for the Q state-action encoder. + head: A `Model` for the Q head. + + Returns: + The estimated Q-value using the `encoder` and `head` networks. + """ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/sac/sac_tf_model.py b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/sac/sac_tf_model.py new file mode 100644 index 0000000000000000000000000000000000000000..7302a25fcccf95a5ec262c0cf3574c62df328222 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/sac/sac_tf_model.py @@ -0,0 +1,321 @@ +import gymnasium as gym +from gymnasium.spaces import Box, Discrete +import numpy as np +import tree # pip install dm_tree +from typing import Dict, List, Optional + +from ray.rllib.models.catalog import ModelCatalog +from ray.rllib.models.tf.tf_modelv2 import TFModelV2 +from ray.rllib.utils.annotations import override +from ray.rllib.utils.framework import try_import_tf +from ray.rllib.utils.spaces.simplex import Simplex +from ray.rllib.utils.typing import ModelConfigDict, TensorType, TensorStructType + +tf1, tf, tfv = try_import_tf() + + +class SACTFModel(TFModelV2): + """Extension of the standard TFModelV2 for SAC. + + To customize, do one of the following: + - sub-class SACTFModel and override one or more of its methods. + - Use SAC's `q_model_config` and `policy_model` keys to tweak the default model + behaviors (e.g. fcnet_hiddens, conv_filters, etc..). + - Use SAC's `q_model_config->custom_model` and `policy_model->custom_model` keys + to specify your own custom Q-model(s) and policy-models, which will be + created within this SACTFModel (see `build_policy_model` and + `build_q_model`. + + Note: It is not recommended to override the `forward` method for SAC. This + would lead to shared weights (between policy and Q-nets), which will then + not be optimized by either of the critic- or actor-optimizers! + + Data flow: + `obs` -> forward() (should stay a noop method!) -> `model_out` + `model_out` -> get_policy_output() -> pi(actions|obs) + `model_out`, `actions` -> get_q_values() -> Q(s, a) + `model_out`, `actions` -> get_twin_q_values() -> Q_twin(s, a) + """ + + def __init__( + self, + obs_space: gym.spaces.Space, + action_space: gym.spaces.Space, + num_outputs: Optional[int], + model_config: ModelConfigDict, + name: str, + policy_model_config: ModelConfigDict = None, + q_model_config: ModelConfigDict = None, + twin_q: bool = False, + initial_alpha: float = 1.0, + target_entropy: Optional[float] = None, + ): + """Initialize a SACTFModel instance. + + Args: + policy_model_config: The config dict for the + policy network. + q_model_config: The config dict for the + Q-network(s) (2 if twin_q=True). + twin_q: Build twin Q networks (Q-net and target) for more + stable Q-learning. + initial_alpha: The initial value for the to-be-optimized + alpha parameter (default: 1.0). + target_entropy (Optional[float]): A target entropy value for + the to-be-optimized alpha parameter. If None, will use the + defaults described in the papers for SAC (and discrete SAC). + + Note that the core layers for forward() are not defined here, this + only defines the layers for the output heads. Those layers for + forward() should be defined in subclasses of SACModel. + """ + super(SACTFModel, self).__init__( + obs_space, action_space, num_outputs, model_config, name + ) + if isinstance(action_space, Discrete): + self.action_dim = action_space.n + self.discrete = True + action_outs = q_outs = self.action_dim + elif isinstance(action_space, Box): + self.action_dim = np.prod(action_space.shape) + self.discrete = False + action_outs = 2 * self.action_dim + q_outs = 1 + else: + assert isinstance(action_space, Simplex) + self.action_dim = np.prod(action_space.shape) + self.discrete = False + action_outs = self.action_dim + q_outs = 1 + + self.action_model = self.build_policy_model( + self.obs_space, action_outs, policy_model_config, "policy_model" + ) + + self.q_net = self.build_q_model( + self.obs_space, self.action_space, q_outs, q_model_config, "q" + ) + if twin_q: + self.twin_q_net = self.build_q_model( + self.obs_space, self.action_space, q_outs, q_model_config, "twin_q" + ) + else: + self.twin_q_net = None + + self.log_alpha = tf.Variable( + np.log(initial_alpha), dtype=tf.float32, name="log_alpha" + ) + self.alpha = tf.exp(self.log_alpha) + + # Auto-calculate the target entropy. + if target_entropy is None or target_entropy == "auto": + # See hyperparams in [2] (README.md). + if self.discrete: + target_entropy = 0.98 * np.array( + -np.log(1.0 / action_space.n), dtype=np.float32 + ) + # See [1] (README.md). + else: + target_entropy = -np.prod(action_space.shape) + self.target_entropy = target_entropy + + @override(TFModelV2) + def forward( + self, + input_dict: Dict[str, TensorType], + state: List[TensorType], + seq_lens: TensorType, + ) -> (TensorType, List[TensorType]): + """The common (Q-net and policy-net) forward pass. + + NOTE: It is not(!) recommended to override this method as it would + introduce a shared pre-network, which would be updated by both + actor- and critic optimizers. + """ + return input_dict["obs"], state + + def build_policy_model(self, obs_space, num_outputs, policy_model_config, name): + """Builds the policy model used by this SAC. + + Override this method in a sub-class of SACTFModel to implement your + own policy net. Alternatively, simply set `custom_model` within the + top level SAC `policy_model` config key to make this default + implementation of `build_policy_model` use your custom policy network. + + Returns: + TFModelV2: The TFModelV2 policy sub-model. + """ + model = ModelCatalog.get_model_v2( + obs_space, + self.action_space, + num_outputs, + policy_model_config, + framework="tf", + name=name, + ) + return model + + def build_q_model(self, obs_space, action_space, num_outputs, q_model_config, name): + """Builds one of the (twin) Q-nets used by this SAC. + + Override this method in a sub-class of SACTFModel to implement your + own Q-nets. Alternatively, simply set `custom_model` within the + top level SAC `q_model_config` config key to make this default implementation + of `build_q_model` use your custom Q-nets. + + Returns: + TFModelV2: The TFModelV2 Q-net sub-model. + """ + self.concat_obs_and_actions = False + if self.discrete: + input_space = obs_space + else: + orig_space = getattr(obs_space, "original_space", obs_space) + if isinstance(orig_space, Box) and len(orig_space.shape) == 1: + input_space = Box( + float("-inf"), + float("inf"), + shape=(orig_space.shape[0] + action_space.shape[0],), + ) + self.concat_obs_and_actions = True + else: + input_space = gym.spaces.Tuple([orig_space, action_space]) + + model = ModelCatalog.get_model_v2( + input_space, + action_space, + num_outputs, + q_model_config, + framework="tf", + name=name, + ) + return model + + def get_q_values( + self, model_out: TensorType, actions: Optional[TensorType] = None + ) -> TensorType: + """Returns Q-values, given the output of self.__call__(). + + This implements Q(s, a) -> [single Q-value] for the continuous case and + Q(s) -> [Q-values for all actions] for the discrete case. + + Args: + model_out: Feature outputs from the model layers + (result of doing `self.__call__(obs)`). + actions (Optional[TensorType]): Continuous action batch to return + Q-values for. Shape: [BATCH_SIZE, action_dim]. If None + (discrete action case), return Q-values for all actions. + + Returns: + TensorType: Q-values tensor of shape [BATCH_SIZE, 1]. + """ + return self._get_q_value(model_out, actions, self.q_net) + + def get_twin_q_values( + self, model_out: TensorType, actions: Optional[TensorType] = None + ) -> TensorType: + """Same as get_q_values but using the twin Q net. + + This implements the twin Q(s, a). + + Args: + model_out: Feature outputs from the model layers + (result of doing `self.__call__(obs)`). + actions (Optional[Tensor]): Actions to return the Q-values for. + Shape: [BATCH_SIZE, action_dim]. If None (discrete action + case), return Q-values for all actions. + + Returns: + TensorType: Q-values tensor of shape [BATCH_SIZE, 1]. + """ + return self._get_q_value(model_out, actions, self.twin_q_net) + + def _get_q_value(self, model_out, actions, net): + # Model outs may come as original Tuple/Dict observations, concat them + # here if this is the case. + if isinstance(net.obs_space, Box): + if isinstance(model_out, (list, tuple)): + model_out = tf.concat(model_out, axis=-1) + elif isinstance(model_out, dict): + model_out = tf.concat(list(model_out.values()), axis=-1) + + # Continuous case -> concat actions to model_out. + if actions is not None: + if self.concat_obs_and_actions: + input_dict = {"obs": tf.concat([model_out, actions], axis=-1)} + else: + # TODO(junogng) : SampleBatch doesn't support list columns yet. + # Use ModelInputDict. + input_dict = {"obs": (model_out, actions)} + # Discrete case -> return q-vals for all actions. + else: + input_dict = {"obs": model_out} + # Switch on training mode (when getting Q-values, we are usually in + # training). + input_dict["is_training"] = True + + return net(input_dict, [], None) + + def get_action_model_outputs( + self, + model_out: TensorType, + state_in: List[TensorType] = None, + seq_lens: TensorType = None, + ) -> (TensorType, List[TensorType]): + """Returns distribution inputs and states given the output of + policy.model(). + + For continuous action spaces, these will be the mean/stddev + distribution inputs for the (SquashedGaussian) action distribution. + For discrete action spaces, these will be the logits for a categorical + distribution. + + Args: + model_out: Feature outputs from the model layers + (result of doing `model(obs)`). + state_in List(TensorType): State input for recurrent cells + seq_lens: Sequence lengths of input- and state + sequences + + Returns: + TensorType: Distribution inputs for sampling actions. + """ + + def concat_obs_if_necessary(obs: TensorStructType): + """Concat model outs if they are original tuple observations.""" + if isinstance(obs, (list, tuple)): + obs = tf.concat(obs, axis=-1) + elif isinstance(obs, dict): + obs = tf.concat( + [ + tf.expand_dims(val, 1) if len(val.shape) == 1 else val + for val in tree.flatten(obs.values()) + ], + axis=-1, + ) + return obs + + if state_in is None: + state_in = [] + + if isinstance(model_out, dict) and "obs" in model_out: + # Model outs may come as original Tuple observations + if isinstance(self.action_model.obs_space, Box): + model_out["obs"] = concat_obs_if_necessary(model_out["obs"]) + return self.action_model(model_out, state_in, seq_lens) + else: + if isinstance(self.action_model.obs_space, Box): + model_out = concat_obs_if_necessary(model_out) + return self.action_model({"obs": model_out}, state_in, seq_lens) + + def policy_variables(self): + """Return the list of variables for the policy net.""" + + return self.action_model.variables() + + def q_variables(self): + """Return the list of variables for Q / twin Q nets.""" + + return self.q_net.variables() + ( + self.twin_q_net.variables() if self.twin_q_net else [] + ) diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/sac/sac_torch_model.py b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/sac/sac_torch_model.py new file mode 100644 index 0000000000000000000000000000000000000000..00219fd95b8af80fe76f47079ba1f3af79482fc2 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/sac/sac_torch_model.py @@ -0,0 +1,329 @@ +import gymnasium as gym +from gymnasium.spaces import Box, Discrete +import numpy as np +import tree # pip install dm_tree +from typing import Dict, List, Optional + +from ray.rllib.models.catalog import ModelCatalog +from ray.rllib.models.torch.torch_modelv2 import TorchModelV2 +from ray.rllib.utils.annotations import override +from ray.rllib.utils.framework import try_import_torch +from ray.rllib.utils.spaces.simplex import Simplex +from ray.rllib.utils.typing import ModelConfigDict, TensorType, TensorStructType + +torch, nn = try_import_torch() + + +class SACTorchModel(TorchModelV2, nn.Module): + """Extension of the standard TorchModelV2 for SAC. + + To customize, do one of the following: + - sub-class SACTorchModel and override one or more of its methods. + - Use SAC's `q_model_config` and `policy_model` keys to tweak the default model + behaviors (e.g. fcnet_hiddens, conv_filters, etc..). + - Use SAC's `q_model_config->custom_model` and `policy_model->custom_model` keys + to specify your own custom Q-model(s) and policy-models, which will be + created within this SACTFModel (see `build_policy_model` and + `build_q_model`. + + Note: It is not recommended to override the `forward` method for SAC. This + would lead to shared weights (between policy and Q-nets), which will then + not be optimized by either of the critic- or actor-optimizers! + + Data flow: + `obs` -> forward() (should stay a noop method!) -> `model_out` + `model_out` -> get_policy_output() -> pi(actions|obs) + `model_out`, `actions` -> get_q_values() -> Q(s, a) + `model_out`, `actions` -> get_twin_q_values() -> Q_twin(s, a) + """ + + def __init__( + self, + obs_space: gym.spaces.Space, + action_space: gym.spaces.Space, + num_outputs: Optional[int], + model_config: ModelConfigDict, + name: str, + policy_model_config: ModelConfigDict = None, + q_model_config: ModelConfigDict = None, + twin_q: bool = False, + initial_alpha: float = 1.0, + target_entropy: Optional[float] = None, + ): + """Initializes a SACTorchModel instance. + 7 + Args: + policy_model_config: The config dict for the + policy network. + q_model_config: The config dict for the + Q-network(s) (2 if twin_q=True). + twin_q: Build twin Q networks (Q-net and target) for more + stable Q-learning. + initial_alpha: The initial value for the to-be-optimized + alpha parameter (default: 1.0). + target_entropy (Optional[float]): A target entropy value for + the to-be-optimized alpha parameter. If None, will use the + defaults described in the papers for SAC (and discrete SAC). + + Note that the core layers for forward() are not defined here, this + only defines the layers for the output heads. Those layers for + forward() should be defined in subclasses of SACModel. + """ + nn.Module.__init__(self) + super(SACTorchModel, self).__init__( + obs_space, action_space, num_outputs, model_config, name + ) + + if isinstance(action_space, Discrete): + self.action_dim = action_space.n + self.discrete = True + action_outs = q_outs = self.action_dim + elif isinstance(action_space, Box): + self.action_dim = np.prod(action_space.shape) + self.discrete = False + action_outs = 2 * self.action_dim + q_outs = 1 + else: + assert isinstance(action_space, Simplex) + self.action_dim = np.prod(action_space.shape) + self.discrete = False + action_outs = self.action_dim + q_outs = 1 + + # Build the policy network. + self.action_model = self.build_policy_model( + self.obs_space, action_outs, policy_model_config, "policy_model" + ) + + # Build the Q-network(s). + self.q_net = self.build_q_model( + self.obs_space, self.action_space, q_outs, q_model_config, "q" + ) + if twin_q: + self.twin_q_net = self.build_q_model( + self.obs_space, self.action_space, q_outs, q_model_config, "twin_q" + ) + else: + self.twin_q_net = None + + log_alpha = nn.Parameter( + torch.from_numpy(np.array([np.log(initial_alpha)])).float() + ) + self.register_parameter("log_alpha", log_alpha) + + # Auto-calculate the target entropy. + if target_entropy is None or target_entropy == "auto": + # See hyperparams in [2] (README.md). + if self.discrete: + target_entropy = 0.98 * np.array( + -np.log(1.0 / action_space.n), dtype=np.float32 + ) + # See [1] (README.md). + else: + target_entropy = -np.prod(action_space.shape) + + target_entropy = nn.Parameter( + torch.from_numpy(np.array([target_entropy])).float(), requires_grad=False + ) + self.register_parameter("target_entropy", target_entropy) + + @override(TorchModelV2) + def forward( + self, + input_dict: Dict[str, TensorType], + state: List[TensorType], + seq_lens: TensorType, + ) -> (TensorType, List[TensorType]): + """The common (Q-net and policy-net) forward pass. + + NOTE: It is not(!) recommended to override this method as it would + introduce a shared pre-network, which would be updated by both + actor- and critic optimizers. + """ + return input_dict["obs"], state + + def build_policy_model(self, obs_space, num_outputs, policy_model_config, name): + """Builds the policy model used by this SAC. + + Override this method in a sub-class of SACTFModel to implement your + own policy net. Alternatively, simply set `custom_model` within the + top level SAC `policy_model` config key to make this default + implementation of `build_policy_model` use your custom policy network. + + Returns: + TorchModelV2: The TorchModelV2 policy sub-model. + """ + model = ModelCatalog.get_model_v2( + obs_space, + self.action_space, + num_outputs, + policy_model_config, + framework="torch", + name=name, + ) + return model + + def build_q_model(self, obs_space, action_space, num_outputs, q_model_config, name): + """Builds one of the (twin) Q-nets used by this SAC. + + Override this method in a sub-class of SACTFModel to implement your + own Q-nets. Alternatively, simply set `custom_model` within the + top level SAC `q_model_config` config key to make this default implementation + of `build_q_model` use your custom Q-nets. + + Returns: + TorchModelV2: The TorchModelV2 Q-net sub-model. + """ + self.concat_obs_and_actions = False + if self.discrete: + input_space = obs_space + else: + orig_space = getattr(obs_space, "original_space", obs_space) + if isinstance(orig_space, Box) and len(orig_space.shape) == 1: + input_space = Box( + float("-inf"), + float("inf"), + shape=(orig_space.shape[0] + action_space.shape[0],), + ) + self.concat_obs_and_actions = True + else: + input_space = gym.spaces.Tuple([orig_space, action_space]) + + model = ModelCatalog.get_model_v2( + input_space, + action_space, + num_outputs, + q_model_config, + framework="torch", + name=name, + ) + return model + + def get_q_values( + self, model_out: TensorType, actions: Optional[TensorType] = None + ) -> TensorType: + """Returns Q-values, given the output of self.__call__(). + + This implements Q(s, a) -> [single Q-value] for the continuous case and + Q(s) -> [Q-values for all actions] for the discrete case. + + Args: + model_out: Feature outputs from the model layers + (result of doing `self.__call__(obs)`). + actions (Optional[TensorType]): Continuous action batch to return + Q-values for. Shape: [BATCH_SIZE, action_dim]. If None + (discrete action case), return Q-values for all actions. + + Returns: + TensorType: Q-values tensor of shape [BATCH_SIZE, 1]. + """ + return self._get_q_value(model_out, actions, self.q_net) + + def get_twin_q_values( + self, model_out: TensorType, actions: Optional[TensorType] = None + ) -> TensorType: + """Same as get_q_values but using the twin Q net. + + This implements the twin Q(s, a). + + Args: + model_out: Feature outputs from the model layers + (result of doing `self.__call__(obs)`). + actions (Optional[Tensor]): Actions to return the Q-values for. + Shape: [BATCH_SIZE, action_dim]. If None (discrete action + case), return Q-values for all actions. + + Returns: + TensorType: Q-values tensor of shape [BATCH_SIZE, 1]. + """ + return self._get_q_value(model_out, actions, self.twin_q_net) + + def _get_q_value(self, model_out, actions, net): + # Model outs may come as original Tuple observations, concat them + # here if this is the case. + if isinstance(net.obs_space, Box): + if isinstance(model_out, (list, tuple)): + model_out = torch.cat(model_out, dim=-1) + elif isinstance(model_out, dict): + model_out = torch.cat(list(model_out.values()), dim=-1) + + # Continuous case -> concat actions to model_out. + if actions is not None: + if self.concat_obs_and_actions: + input_dict = {"obs": torch.cat([model_out, actions], dim=-1)} + else: + # TODO(junogng) : SampleBatch doesn't support list columns yet. + # Use ModelInputDict. + input_dict = {"obs": (model_out, actions)} + # Discrete case -> return q-vals for all actions. + else: + input_dict = {"obs": model_out} + # Switch on training mode (when getting Q-values, we are usually in + # training). + input_dict["is_training"] = True + + return net(input_dict, [], None) + + def get_action_model_outputs( + self, + model_out: TensorType, + state_in: List[TensorType] = None, + seq_lens: TensorType = None, + ) -> (TensorType, List[TensorType]): + """Returns distribution inputs and states given the output of + policy.model(). + + For continuous action spaces, these will be the mean/stddev + distribution inputs for the (SquashedGaussian) action distribution. + For discrete action spaces, these will be the logits for a categorical + distribution. + + Args: + model_out: Feature outputs from the model layers + (result of doing `model(obs)`). + state_in List(TensorType): State input for recurrent cells + seq_lens: Sequence lengths of input- and state + sequences + + Returns: + TensorType: Distribution inputs for sampling actions. + """ + + def concat_obs_if_necessary(obs: TensorStructType): + """Concat model outs if they come as original tuple observations.""" + if isinstance(obs, (list, tuple)): + obs = torch.cat(obs, dim=-1) + elif isinstance(obs, dict): + obs = torch.cat( + [ + torch.unsqueeze(val, 1) if len(val.shape) == 1 else val + for val in tree.flatten(obs.values()) + ], + dim=-1, + ) + return obs + + if state_in is None: + state_in = [] + + if isinstance(model_out, dict) and "obs" in model_out: + # Model outs may come as original Tuple observations + if isinstance(self.action_model.obs_space, Box): + model_out["obs"] = concat_obs_if_necessary(model_out["obs"]) + return self.action_model(model_out, state_in, seq_lens) + else: + if isinstance(self.action_model.obs_space, Box): + model_out = concat_obs_if_necessary(model_out) + return self.action_model({"obs": model_out}, state_in, seq_lens) + + def policy_variables(self): + """Return the list of variables for the policy net.""" + + return self.action_model.variables() + + def q_variables(self): + """Return the list of variables for Q / twin Q nets.""" + + return self.q_net.variables() + ( + self.twin_q_net.variables() if self.twin_q_net else [] + ) diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/sac/sac_torch_policy.py b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/sac/sac_torch_policy.py new file mode 100644 index 0000000000000000000000000000000000000000..eebcc18d3a2235d4dc40cc0328784e61ba47cbda --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/sac/sac_torch_policy.py @@ -0,0 +1,517 @@ +""" +PyTorch policy class used for SAC. +""" + +import gymnasium as gym +from gymnasium.spaces import Box, Discrete +import logging +import tree # pip install dm_tree +from typing import Dict, List, Optional, Tuple, Type, Union + +import ray +import ray.experimental.tf_utils +from ray.rllib.algorithms.sac.sac_tf_policy import ( + build_sac_model, + postprocess_trajectory, + validate_spaces, +) +from ray.rllib.algorithms.dqn.dqn_tf_policy import PRIO_WEIGHTS +from ray.rllib.models.catalog import ModelCatalog +from ray.rllib.models.modelv2 import ModelV2 +from ray.rllib.models.torch.torch_action_dist import ( + TorchCategorical, + TorchDistributionWrapper, + TorchDirichlet, + TorchSquashedGaussian, + TorchDiagGaussian, + TorchBeta, +) +from ray.rllib.policy.policy import Policy +from ray.rllib.policy.policy_template import build_policy_class +from ray.rllib.policy.sample_batch import SampleBatch +from ray.rllib.utils.framework import try_import_torch +from ray.rllib.utils.spaces.simplex import Simplex +from ray.rllib.policy.torch_mixins import TargetNetworkMixin +from ray.rllib.utils.torch_utils import ( + apply_grad_clipping, + concat_multi_gpu_td_errors, + huber_loss, +) +from ray.rllib.utils.typing import ( + LocalOptimizer, + ModelInputDict, + TensorType, + AlgorithmConfigDict, +) + +torch, nn = try_import_torch() +F = nn.functional + +logger = logging.getLogger(__name__) + + +def _get_dist_class( + policy: Policy, config: AlgorithmConfigDict, action_space: gym.spaces.Space +) -> Type[TorchDistributionWrapper]: + """Helper function to return a dist class based on config and action space. + + Args: + policy: The policy for which to return the action + dist class. + config: The Algorithm's config dict. + action_space (gym.spaces.Space): The action space used. + + Returns: + Type[TFActionDistribution]: A TF distribution class. + """ + if hasattr(policy, "dist_class") and policy.dist_class is not None: + return policy.dist_class + elif config["model"].get("custom_action_dist"): + action_dist_class, _ = ModelCatalog.get_action_dist( + action_space, config["model"], framework="torch" + ) + return action_dist_class + elif isinstance(action_space, Discrete): + return TorchCategorical + elif isinstance(action_space, Simplex): + return TorchDirichlet + else: + assert isinstance(action_space, Box) + if config["normalize_actions"]: + return ( + TorchSquashedGaussian + if not config["_use_beta_distribution"] + else TorchBeta + ) + else: + return TorchDiagGaussian + + +def build_sac_model_and_action_dist( + policy: Policy, + obs_space: gym.spaces.Space, + action_space: gym.spaces.Space, + config: AlgorithmConfigDict, +) -> Tuple[ModelV2, Type[TorchDistributionWrapper]]: + """Constructs the necessary ModelV2 and action dist class for the Policy. + + Args: + policy: The TFPolicy that will use the models. + obs_space (gym.spaces.Space): The observation space. + action_space (gym.spaces.Space): The action space. + config: The SACConfig object. + + Returns: + ModelV2: The ModelV2 to be used by the Policy. Note: An additional + target model will be created in this function and assigned to + `policy.target_model`. + """ + model = build_sac_model(policy, obs_space, action_space, config) + action_dist_class = _get_dist_class(policy, config, action_space) + return model, action_dist_class + + +def action_distribution_fn( + policy: Policy, + model: ModelV2, + input_dict: ModelInputDict, + *, + state_batches: Optional[List[TensorType]] = None, + seq_lens: Optional[TensorType] = None, + prev_action_batch: Optional[TensorType] = None, + prev_reward_batch=None, + explore: Optional[bool] = None, + timestep: Optional[int] = None, + is_training: Optional[bool] = None +) -> Tuple[TensorType, Type[TorchDistributionWrapper], List[TensorType]]: + """The action distribution function to be used the algorithm. + + An action distribution function is used to customize the choice of action + distribution class and the resulting action distribution inputs (to + parameterize the distribution object). + After parameterizing the distribution, a `sample()` call + will be made on it to generate actions. + + Args: + policy: The Policy being queried for actions and calling this + function. + model (TorchModelV2): The SAC specific model to use to generate the + distribution inputs (see sac_tf|torch_model.py). Must support the + `get_action_model_outputs` method. + input_dict: The input-dict to be used for the model + call. + state_batches (Optional[List[TensorType]]): The list of internal state + tensor batches. + seq_lens (Optional[TensorType]): The tensor of sequence lengths used + in RNNs. + prev_action_batch (Optional[TensorType]): Optional batch of prev + actions used by the model. + prev_reward_batch (Optional[TensorType]): Optional batch of prev + rewards used by the model. + explore (Optional[bool]): Whether to activate exploration or not. If + None, use value of `config.explore`. + timestep (Optional[int]): An optional timestep. + is_training (Optional[bool]): An optional is-training flag. + + Returns: + Tuple[TensorType, Type[TorchDistributionWrapper], List[TensorType]]: + The dist inputs, dist class, and a list of internal state outputs + (in the RNN case). + """ + # Get base-model output (w/o the SAC specific parts of the network). + model_out, _ = model(input_dict, [], None) + # Use the base output to get the policy outputs from the SAC model's + # policy components. + action_dist_inputs, _ = model.get_action_model_outputs(model_out) + # Get a distribution class to be used with the just calculated dist-inputs. + action_dist_class = _get_dist_class(policy, policy.config, policy.action_space) + + return action_dist_inputs, action_dist_class, [] + + +def actor_critic_loss( + policy: Policy, + model: ModelV2, + dist_class: Type[TorchDistributionWrapper], + train_batch: SampleBatch, +) -> Union[TensorType, List[TensorType]]: + """Constructs the loss for the Soft Actor Critic. + + Args: + policy: The Policy to calculate the loss for. + model (ModelV2): The Model to calculate the loss for. + dist_class (Type[TorchDistributionWrapper]: The action distr. class. + train_batch: The training data. + + Returns: + Union[TensorType, List[TensorType]]: A single loss tensor or a list + of loss tensors. + """ + # Look up the target model (tower) using the model tower. + target_model = policy.target_models[model] + + # Should be True only for debugging purposes (e.g. test cases)! + deterministic = policy.config["_deterministic_loss"] + + model_out_t, _ = model( + SampleBatch(obs=train_batch[SampleBatch.CUR_OBS], _is_training=True), [], None + ) + + model_out_tp1, _ = model( + SampleBatch(obs=train_batch[SampleBatch.NEXT_OBS], _is_training=True), [], None + ) + + target_model_out_tp1, _ = target_model( + SampleBatch(obs=train_batch[SampleBatch.NEXT_OBS], _is_training=True), [], None + ) + + alpha = torch.exp(model.log_alpha) + + # Discrete case. + if model.discrete: + # Get all action probs directly from pi and form their logp. + action_dist_inputs_t, _ = model.get_action_model_outputs(model_out_t) + log_pis_t = F.log_softmax(action_dist_inputs_t, dim=-1) + policy_t = torch.exp(log_pis_t) + action_dist_inputs_tp1, _ = model.get_action_model_outputs(model_out_tp1) + log_pis_tp1 = F.log_softmax(action_dist_inputs_tp1, -1) + policy_tp1 = torch.exp(log_pis_tp1) + # Q-values. + q_t, _ = model.get_q_values(model_out_t) + # Target Q-values. + q_tp1, _ = target_model.get_q_values(target_model_out_tp1) + if policy.config["twin_q"]: + twin_q_t, _ = model.get_twin_q_values(model_out_t) + twin_q_tp1, _ = target_model.get_twin_q_values(target_model_out_tp1) + q_tp1 = torch.min(q_tp1, twin_q_tp1) + q_tp1 -= alpha * log_pis_tp1 + + # Actually selected Q-values (from the actions batch). + one_hot = F.one_hot( + train_batch[SampleBatch.ACTIONS].long(), num_classes=q_t.size()[-1] + ) + q_t_selected = torch.sum(q_t * one_hot, dim=-1) + if policy.config["twin_q"]: + twin_q_t_selected = torch.sum(twin_q_t * one_hot, dim=-1) + # Discrete case: "Best" means weighted by the policy (prob) outputs. + q_tp1_best = torch.sum(torch.mul(policy_tp1, q_tp1), dim=-1) + q_tp1_best_masked = ( + 1.0 - train_batch[SampleBatch.TERMINATEDS].float() + ) * q_tp1_best + # Continuous actions case. + else: + # Sample single actions from distribution. + action_dist_class = _get_dist_class(policy, policy.config, policy.action_space) + action_dist_inputs_t, _ = model.get_action_model_outputs(model_out_t) + action_dist_t = action_dist_class(action_dist_inputs_t, model) + policy_t = ( + action_dist_t.sample() + if not deterministic + else action_dist_t.deterministic_sample() + ) + log_pis_t = torch.unsqueeze(action_dist_t.logp(policy_t), -1) + action_dist_inputs_tp1, _ = model.get_action_model_outputs(model_out_tp1) + action_dist_tp1 = action_dist_class(action_dist_inputs_tp1, model) + policy_tp1 = ( + action_dist_tp1.sample() + if not deterministic + else action_dist_tp1.deterministic_sample() + ) + log_pis_tp1 = torch.unsqueeze(action_dist_tp1.logp(policy_tp1), -1) + + # Q-values for the actually selected actions. + q_t, _ = model.get_q_values(model_out_t, train_batch[SampleBatch.ACTIONS]) + if policy.config["twin_q"]: + twin_q_t, _ = model.get_twin_q_values( + model_out_t, train_batch[SampleBatch.ACTIONS] + ) + + # Q-values for current policy in given current state. + q_t_det_policy, _ = model.get_q_values(model_out_t, policy_t) + if policy.config["twin_q"]: + twin_q_t_det_policy, _ = model.get_twin_q_values(model_out_t, policy_t) + q_t_det_policy = torch.min(q_t_det_policy, twin_q_t_det_policy) + + # Target q network evaluation. + q_tp1, _ = target_model.get_q_values(target_model_out_tp1, policy_tp1) + if policy.config["twin_q"]: + twin_q_tp1, _ = target_model.get_twin_q_values( + target_model_out_tp1, policy_tp1 + ) + # Take min over both twin-NNs. + q_tp1 = torch.min(q_tp1, twin_q_tp1) + + q_t_selected = torch.squeeze(q_t, dim=-1) + if policy.config["twin_q"]: + twin_q_t_selected = torch.squeeze(twin_q_t, dim=-1) + q_tp1 -= alpha * log_pis_tp1 + + q_tp1_best = torch.squeeze(input=q_tp1, dim=-1) + q_tp1_best_masked = ( + 1.0 - train_batch[SampleBatch.TERMINATEDS].float() + ) * q_tp1_best + + # compute RHS of bellman equation + q_t_selected_target = ( + train_batch[SampleBatch.REWARDS] + + (policy.config["gamma"] ** policy.config["n_step"]) * q_tp1_best_masked + ).detach() + + # Compute the TD-error (potentially clipped). + base_td_error = torch.abs(q_t_selected - q_t_selected_target) + if policy.config["twin_q"]: + twin_td_error = torch.abs(twin_q_t_selected - q_t_selected_target) + td_error = 0.5 * (base_td_error + twin_td_error) + else: + td_error = base_td_error + + critic_loss = [torch.mean(train_batch[PRIO_WEIGHTS] * huber_loss(base_td_error))] + if policy.config["twin_q"]: + critic_loss.append( + torch.mean(train_batch[PRIO_WEIGHTS] * huber_loss(twin_td_error)) + ) + + # Alpha- and actor losses. + # Note: In the papers, alpha is used directly, here we take the log. + # Discrete case: Multiply the action probs as weights with the original + # loss terms (no expectations needed). + if model.discrete: + weighted_log_alpha_loss = policy_t.detach() * ( + -model.log_alpha * (log_pis_t + model.target_entropy).detach() + ) + # Sum up weighted terms and mean over all batch items. + alpha_loss = torch.mean(torch.sum(weighted_log_alpha_loss, dim=-1)) + # Actor loss. + actor_loss = torch.mean( + torch.sum( + torch.mul( + # NOTE: No stop_grad around policy output here + # (compare with q_t_det_policy for continuous case). + policy_t, + alpha.detach() * log_pis_t - q_t.detach(), + ), + dim=-1, + ) + ) + else: + alpha_loss = -torch.mean( + model.log_alpha * (log_pis_t + model.target_entropy).detach() + ) + # Note: Do not detach q_t_det_policy here b/c is depends partly + # on the policy vars (policy sample pushed through Q-net). + # However, we must make sure `actor_loss` is not used to update + # the Q-net(s)' variables. + actor_loss = torch.mean(alpha.detach() * log_pis_t - q_t_det_policy) + + # Store values for stats function in model (tower), such that for + # multi-GPU, we do not override them during the parallel loss phase. + model.tower_stats["q_t"] = q_t + model.tower_stats["policy_t"] = policy_t + model.tower_stats["log_pis_t"] = log_pis_t + model.tower_stats["actor_loss"] = actor_loss + model.tower_stats["critic_loss"] = critic_loss + model.tower_stats["alpha_loss"] = alpha_loss + + # TD-error tensor in final stats + # will be concatenated and retrieved for each individual batch item. + model.tower_stats["td_error"] = td_error + + # Return all loss terms corresponding to our optimizers. + return tuple([actor_loss] + critic_loss + [alpha_loss]) + + +def stats(policy: Policy, train_batch: SampleBatch) -> Dict[str, TensorType]: + """Stats function for SAC. Returns a dict with important loss stats. + + Args: + policy: The Policy to generate stats for. + train_batch: The SampleBatch (already) used for training. + + Returns: + Dict[str, TensorType]: The stats dict. + """ + q_t = torch.stack(policy.get_tower_stats("q_t")) + + return { + "actor_loss": torch.mean(torch.stack(policy.get_tower_stats("actor_loss"))), + "critic_loss": torch.mean( + torch.stack(tree.flatten(policy.get_tower_stats("critic_loss"))) + ), + "alpha_loss": torch.mean(torch.stack(policy.get_tower_stats("alpha_loss"))), + "alpha_value": torch.exp(policy.model.log_alpha), + "log_alpha_value": policy.model.log_alpha, + "target_entropy": policy.model.target_entropy, + "policy_t": torch.mean(torch.stack(policy.get_tower_stats("policy_t"))), + "mean_q": torch.mean(q_t), + "max_q": torch.max(q_t), + "min_q": torch.min(q_t), + } + + +def optimizer_fn(policy: Policy, config: AlgorithmConfigDict) -> Tuple[LocalOptimizer]: + """Creates all necessary optimizers for SAC learning. + + The 3 or 4 (twin_q=True) optimizers returned here correspond to the + number of loss terms returned by the loss function. + + Args: + policy: The policy object to be trained. + config: The Algorithm's config dict. + + Returns: + Tuple[LocalOptimizer]: The local optimizers to use for policy training. + """ + policy.actor_optim = torch.optim.Adam( + params=policy.model.policy_variables(), + lr=config["optimization"]["actor_learning_rate"], + eps=1e-7, # to match tf.keras.optimizers.Adam's epsilon default + ) + + critic_split = len(policy.model.q_variables()) + if config["twin_q"]: + critic_split //= 2 + + policy.critic_optims = [ + torch.optim.Adam( + params=policy.model.q_variables()[:critic_split], + lr=config["optimization"]["critic_learning_rate"], + eps=1e-7, # to match tf.keras.optimizers.Adam's epsilon default + ) + ] + if config["twin_q"]: + policy.critic_optims.append( + torch.optim.Adam( + params=policy.model.q_variables()[critic_split:], + lr=config["optimization"]["critic_learning_rate"], + eps=1e-7, # to match tf.keras.optimizers.Adam's eps default + ) + ) + policy.alpha_optim = torch.optim.Adam( + params=[policy.model.log_alpha], + lr=config["optimization"]["entropy_learning_rate"], + eps=1e-7, # to match tf.keras.optimizers.Adam's epsilon default + ) + + return tuple([policy.actor_optim] + policy.critic_optims + [policy.alpha_optim]) + + +# TODO: Unify with DDPG's ComputeTDErrorMixin when SAC policy subclasses PolicyV2 +class ComputeTDErrorMixin: + """Mixin class calculating TD-error (part of critic loss) per batch item. + + - Adds `policy.compute_td_error()` method for TD-error calculation from a + batch of observations/actions/rewards/etc.. + """ + + def __init__(self): + def compute_td_error( + obs_t, act_t, rew_t, obs_tp1, terminateds_mask, importance_weights + ): + input_dict = self._lazy_tensor_dict( + { + SampleBatch.CUR_OBS: obs_t, + SampleBatch.ACTIONS: act_t, + SampleBatch.REWARDS: rew_t, + SampleBatch.NEXT_OBS: obs_tp1, + SampleBatch.TERMINATEDS: terminateds_mask, + PRIO_WEIGHTS: importance_weights, + } + ) + # Do forward pass on loss to update td errors attribute + # (one TD-error value per item in batch to update PR weights). + actor_critic_loss(self, self.model, None, input_dict) + + # `self.model.td_error` is set within actor_critic_loss call. + # Return its updated value here. + return self.model.tower_stats["td_error"] + + # Assign the method to policy (self) for later usage. + self.compute_td_error = compute_td_error + + +def setup_late_mixins( + policy: Policy, + obs_space: gym.spaces.Space, + action_space: gym.spaces.Space, + config: AlgorithmConfigDict, +) -> None: + """Call mixin classes' constructors after Policy initialization. + + - Moves the target model(s) to the GPU, if necessary. + - Adds the `compute_td_error` method to the given policy. + Calling `compute_td_error` with batch data will re-calculate the loss + on that batch AND return the per-batch-item TD-error for prioritized + replay buffer record weight updating (in case a prioritized replay buffer + is used). + - Also adds the `update_target` method to the given policy. + Calling `update_target` updates all target Q-networks' weights from their + respective "main" Q-metworks, based on tau (smooth, partial updating). + + Args: + policy: The Policy object. + obs_space (gym.spaces.Space): The Policy's observation space. + action_space (gym.spaces.Space): The Policy's action space. + config: The Policy's config. + """ + ComputeTDErrorMixin.__init__(policy) + TargetNetworkMixin.__init__(policy) + + +# Build a child class of `TorchPolicy`, given the custom functions defined +# above. +SACTorchPolicy = build_policy_class( + name="SACTorchPolicy", + framework="torch", + loss_fn=actor_critic_loss, + get_default_config=lambda: ray.rllib.algorithms.sac.sac.SACConfig(), + stats_fn=stats, + postprocess_fn=postprocess_trajectory, + extra_grad_process_fn=apply_grad_clipping, + optimizer_fn=optimizer_fn, + validate_spaces=validate_spaces, + before_loss_init=setup_late_mixins, + make_model_and_action_dist=build_sac_model_and_action_dist, + extra_learn_fetches_fn=concat_multi_gpu_td_errors, + mixins=[TargetNetworkMixin, ComputeTDErrorMixin], + action_distribution_fn=action_distribution_fn, +) diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/sac/torch/__pycache__/__init__.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/sac/torch/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..30c9c15218f2e807025dac92d1dcee68d27347a7 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/sac/torch/__pycache__/__init__.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/sac/torch/sac_torch_learner.py b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/sac/torch/sac_torch_learner.py new file mode 100644 index 0000000000000000000000000000000000000000..4cffb877bdc75993dd3ad11aba3994fc14ee6312 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/sac/torch/sac_torch_learner.py @@ -0,0 +1,259 @@ +from typing import Any, Dict + +from ray.rllib.algorithms.algorithm_config import AlgorithmConfig +from ray.rllib.algorithms.dqn.torch.dqn_rainbow_torch_learner import ( + DQNRainbowTorchLearner, +) +from ray.rllib.algorithms.sac.sac import SACConfig +from ray.rllib.algorithms.sac.sac_learner import ( + LOGPS_KEY, + QF_LOSS_KEY, + QF_MEAN_KEY, + QF_MAX_KEY, + QF_MIN_KEY, + QF_PREDS, + QF_TWIN_LOSS_KEY, + QF_TWIN_PREDS, + TD_ERROR_MEAN_KEY, + SACLearner, +) +from ray.rllib.core.columns import Columns +from ray.rllib.core.learner.learner import ( + POLICY_LOSS_KEY, +) +from ray.rllib.utils.annotations import override +from ray.rllib.utils.framework import try_import_torch +from ray.rllib.utils.metrics import ALL_MODULES, TD_ERROR_KEY +from ray.rllib.utils.typing import ModuleID, ParamDict, TensorType + + +torch, nn = try_import_torch() + + +class SACTorchLearner(DQNRainbowTorchLearner, SACLearner): + """Implements `torch`-specific SAC loss logic on top of `SACLearner` + + This ' Learner' class implements the loss in its + `self.compute_loss_for_module()` method. In addition, it updates + the target networks of the RLModule(s). + """ + + # TODO (simon): Set different learning rates for optimizers. + @override(DQNRainbowTorchLearner) + def configure_optimizers_for_module( + self, module_id: ModuleID, config: AlgorithmConfig = None + ) -> None: + # Receive the module. + module = self._module[module_id] + + # Define the optimizer for the critic. + # TODO (sven): Maybe we change here naming to `qf` for unification. + params_critic = self.get_parameters(module.qf_encoder) + self.get_parameters( + module.qf + ) + optim_critic = torch.optim.Adam(params_critic, eps=1e-7) + + self.register_optimizer( + module_id=module_id, + optimizer_name="qf", + optimizer=optim_critic, + params=params_critic, + lr_or_lr_schedule=config.critic_lr, + ) + # If necessary register also an optimizer for a twin Q network. + if config.twin_q: + params_twin_critic = self.get_parameters( + module.qf_twin_encoder + ) + self.get_parameters(module.qf_twin) + optim_twin_critic = torch.optim.Adam(params_twin_critic, eps=1e-7) + + self.register_optimizer( + module_id=module_id, + optimizer_name="qf_twin", + optimizer=optim_twin_critic, + params=params_twin_critic, + lr_or_lr_schedule=config.critic_lr, + ) + + # Define the optimizer for the actor. + params_actor = self.get_parameters(module.pi_encoder) + self.get_parameters( + module.pi + ) + optim_actor = torch.optim.Adam(params_actor, eps=1e-7) + + self.register_optimizer( + module_id=module_id, + optimizer_name="policy", + optimizer=optim_actor, + params=params_actor, + lr_or_lr_schedule=config.actor_lr, + ) + + # Define the optimizer for the temperature. + temperature = self.curr_log_alpha[module_id] + optim_temperature = torch.optim.Adam([temperature], eps=1e-7) + self.register_optimizer( + module_id=module_id, + optimizer_name="alpha", + optimizer=optim_temperature, + params=[temperature], + lr_or_lr_schedule=config.alpha_lr, + ) + + @override(DQNRainbowTorchLearner) + def compute_loss_for_module( + self, + *, + module_id: ModuleID, + config: SACConfig, + batch: Dict[str, Any], + fwd_out: Dict[str, TensorType] + ) -> TensorType: + # Receive the current alpha hyperparameter. + alpha = torch.exp(self.curr_log_alpha[module_id]) + + # Get Q-values for the actually selected actions during rollout. + # In the critic loss we use these as predictions. + q_selected = fwd_out[QF_PREDS] + if config.twin_q: + q_twin_selected = fwd_out[QF_TWIN_PREDS] + + # Compute value function for next state (see eq. (3) in Haarnoja et al. (2018)). + # Note, we use here the sampled actions in the log probabilities. + q_target_next = ( + fwd_out["q_target_next"] - alpha.detach() * fwd_out["logp_next_resampled"] + ) + # Now mask all Q-values with terminated next states in the targets. + q_next_masked = (1.0 - batch[Columns.TERMINATEDS].float()) * q_target_next + + # Compute the right hand side of the Bellman equation. + # Detach this node from the computation graph as we do not want to + # backpropagate through the target network when optimizing the Q loss. + q_selected_target = ( + batch[Columns.REWARDS] + (config.gamma ** batch["n_step"]) * q_next_masked + ).detach() + + # Calculate the TD-error. Note, this is needed for the priority weights in + # the replay buffer. + td_error = torch.abs(q_selected - q_selected_target) + # If a twin Q network should be used, add the TD error of the twin Q network. + if config.twin_q: + td_error += torch.abs(q_twin_selected - q_selected_target) + # Rescale the TD error. + td_error *= 0.5 + + # MSBE loss for the critic(s) (i.e. Q, see eqs. (7-8) Haarnoja et al. (2018)). + # Note, this needs a sample from the current policy given the next state. + # Note further, we use here the Huber loss instead of the mean squared error + # as it improves training performance. + critic_loss = torch.mean( + batch["weights"] + * torch.nn.HuberLoss(reduction="none", delta=1.0)( + q_selected, q_selected_target + ) + ) + # If a twin Q network should be used, add the critic loss of the twin Q network. + if config.twin_q: + critic_twin_loss = torch.mean( + batch["weights"] + * torch.nn.HuberLoss(reduction="none", delta=1.0)( + q_twin_selected, q_selected_target + ) + ) + + # For the actor (policy) loss we need sampled actions from the current policy + # evaluated at the current observations. + # Note that the `q_curr` tensor below has the q-net's gradients ignored, while + # having the policy's gradients registered. The policy net was used to rsample + # actions used to compute `q_curr` (by passing these actions through the q-net). + # Hence, we can't do `fwd_out[q_curr].detach()`! + # Note further, we minimize here, while the original equation in Haarnoja et + # al. (2018) considers maximization. + # TODO (simon): Rename to `resampled` to `current`. + actor_loss = torch.mean( + alpha.detach() * fwd_out["logp_resampled"] - fwd_out["q_curr"] + ) + + # Optimize also the hyperparameter alpha by using the current policy + # evaluated at the current state (sampled values). + # TODO (simon): Check, why log(alpha) is used, prob. just better + # to optimize and monotonic function. Original equation uses alpha. + alpha_loss = -torch.mean( + self.curr_log_alpha[module_id] + * (fwd_out["logp_resampled"].detach() + self.target_entropy[module_id]) + ) + + total_loss = actor_loss + critic_loss + alpha_loss + # If twin Q networks should be used, add the critic loss of the twin Q network. + if config.twin_q: + # TODO (simon): Check, if we need to multiply the critic_loss then with 0.5. + total_loss += critic_twin_loss + + # Log the TD-error with reduce=None, such that - in case we have n parallel + # Learners - we will re-concatenate the produced TD-error tensors to yield + # a 1:1 representation of the original batch. + self.metrics.log_value( + key=(module_id, TD_ERROR_KEY), + value=td_error, + reduce=None, + clear_on_reduce=True, + ) + # Log other important loss stats (reduce=mean (default), but with window=1 + # in order to keep them history free). + self.metrics.log_dict( + { + POLICY_LOSS_KEY: actor_loss, + QF_LOSS_KEY: critic_loss, + "alpha_loss": alpha_loss, + "alpha_value": alpha, + "log_alpha_value": torch.log(alpha), + "target_entropy": self.target_entropy[module_id], + LOGPS_KEY: torch.mean(fwd_out["logp_resampled"]), + QF_MEAN_KEY: torch.mean(fwd_out["q_curr"]), + QF_MAX_KEY: torch.max(fwd_out["q_curr"]), + QF_MIN_KEY: torch.min(fwd_out["q_curr"]), + TD_ERROR_MEAN_KEY: torch.mean(td_error), + }, + key=module_id, + window=1, # <- single items (should not be mean/ema-reduced over time). + ) + # If twin Q networks should be used add a critic loss for the twin Q network. + # Note, we need this in the `self.compute_gradients()` to optimize. + if config.twin_q: + self.metrics.log_dict( + { + QF_TWIN_LOSS_KEY: critic_twin_loss, + }, + key=module_id, + window=1, # <- single items (should not be mean/ema-reduced over time). + ) + + return total_loss + + @override(DQNRainbowTorchLearner) + def compute_gradients( + self, loss_per_module: Dict[ModuleID, TensorType], **kwargs + ) -> ParamDict: + grads = {} + for module_id in set(loss_per_module.keys()) - {ALL_MODULES}: + # Loop through optimizers registered for this module. + for optim_name, optim in self.get_optimizers_for_module(module_id): + # Zero the gradients. Note, we need to reset the gradients b/c + # each component for a module operates on the same graph. + optim.zero_grad(set_to_none=True) + + # Compute the gradients for the component and module. + self.metrics.peek((module_id, optim_name + "_loss")).backward( + retain_graph=True + ) + # Store the gradients for the component and module. + grads.update( + { + pid: p.grad + for pid, p in self.filter_param_dict_for_optimizer( + self._params, optim + ).items() + } + ) + + return grads diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/offline/__pycache__/__init__.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/offline/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eef3dd16f42cf7a986b1aace13b2993a5f61d52e Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/offline/__pycache__/__init__.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/offline/__pycache__/dataset_reader.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/offline/__pycache__/dataset_reader.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d6692ab617be98c7b2a01ae41c24958a9e8c729 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/offline/__pycache__/dataset_reader.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/offline/__pycache__/resource.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/offline/__pycache__/resource.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5d8f414972eed7de50b0a30b063c5c0645a53dc Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/offline/__pycache__/resource.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/offline/estimators/__pycache__/importance_sampling.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/offline/estimators/__pycache__/importance_sampling.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6f76ab37ba9cb3f520ee482b55389fcbf11b9158 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/offline/estimators/__pycache__/importance_sampling.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/offline/json_reader.py b/deepseek/lib/python3.10/site-packages/ray/rllib/offline/json_reader.py new file mode 100644 index 0000000000000000000000000000000000000000..30562b515aac2562b33fbcb847720ee1f17c0131 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/offline/json_reader.py @@ -0,0 +1,438 @@ +import glob +import json +import logging +import math + +import numpy as np +import os +from pathlib import Path +import random +import re +import tree # pip install dm_tree +from typing import List, Optional, TYPE_CHECKING, Union +from urllib.parse import urlparse +import zipfile + +try: + from smart_open import smart_open +except ImportError: + smart_open = None + +from ray.rllib.offline.input_reader import InputReader +from ray.rllib.offline.io_context import IOContext +from ray.rllib.policy.policy import Policy +from ray.rllib.policy.sample_batch import ( + DEFAULT_POLICY_ID, + MultiAgentBatch, + SampleBatch, + concat_samples, + convert_ma_batch_to_sample_batch, +) +from ray.rllib.utils.annotations import override, PublicAPI, DeveloperAPI +from ray.rllib.utils.compression import unpack_if_needed +from ray.rllib.utils.spaces.space_utils import clip_action, normalize_action +from ray.rllib.utils.typing import Any, FileType, SampleBatchType + +if TYPE_CHECKING: + from ray.rllib.evaluation import RolloutWorker + +logger = logging.getLogger(__name__) + +WINDOWS_DRIVES = [chr(i) for i in range(ord("c"), ord("z") + 1)] + + +def _adjust_obs_actions_for_policy(json_data: dict, policy: Policy) -> dict: + """Handle nested action/observation spaces for policies. + + Translates nested lists/dicts from the json into proper + np.ndarrays, according to the (nested) observation- and action- + spaces of the given policy. + + Providing nested lists w/o this preprocessing step would + confuse a SampleBatch constructor. + """ + for k, v in json_data.items(): + data_col = ( + policy.view_requirements[k].data_col + if k in policy.view_requirements + else "" + ) + # No action flattening -> Process nested (leaf) action(s). + if policy.config.get("_disable_action_flattening") and ( + k == SampleBatch.ACTIONS + or data_col == SampleBatch.ACTIONS + or k == SampleBatch.PREV_ACTIONS + or data_col == SampleBatch.PREV_ACTIONS + ): + json_data[k] = tree.map_structure_up_to( + policy.action_space_struct, + lambda comp: np.array(comp), + json_data[k], + check_types=False, + ) + # No preprocessing -> Process nested (leaf) observation(s). + elif policy.config.get("_disable_preprocessor_api") and ( + k == SampleBatch.OBS + or data_col == SampleBatch.OBS + or k == SampleBatch.NEXT_OBS + or data_col == SampleBatch.NEXT_OBS + ): + json_data[k] = tree.map_structure_up_to( + policy.observation_space_struct, + lambda comp: np.array(comp), + json_data[k], + check_types=False, + ) + return json_data + + +@DeveloperAPI +def _adjust_dones(json_data: dict) -> dict: + """Make sure DONES in json data is properly translated into TERMINATEDS.""" + new_json_data = {} + for k, v in json_data.items(): + # Translate DONES into TERMINATEDS. + if k == SampleBatch.DONES: + new_json_data[SampleBatch.TERMINATEDS] = v + # Leave everything else as-is. + else: + new_json_data[k] = v + + return new_json_data + + +@DeveloperAPI +def postprocess_actions(batch: SampleBatchType, ioctx: IOContext) -> SampleBatchType: + # Clip actions (from any values into env's bounds), if necessary. + cfg = ioctx.config + # TODO(jungong): We should not clip_action in input reader. + # Use connector to handle this. + if cfg.get("clip_actions"): + if ioctx.worker is None: + raise ValueError( + "clip_actions is True but cannot clip actions since no workers exist" + ) + + if isinstance(batch, SampleBatch): + policy = ioctx.worker.policy_map.get(DEFAULT_POLICY_ID) + if policy is None: + assert len(ioctx.worker.policy_map) == 1 + policy = next(iter(ioctx.worker.policy_map.values())) + batch[SampleBatch.ACTIONS] = clip_action( + batch[SampleBatch.ACTIONS], policy.action_space_struct + ) + else: + for pid, b in batch.policy_batches.items(): + b[SampleBatch.ACTIONS] = clip_action( + b[SampleBatch.ACTIONS], + ioctx.worker.policy_map[pid].action_space_struct, + ) + # Re-normalize actions (from env's bounds to zero-centered), if + # necessary. + if ( + cfg.get("actions_in_input_normalized") is False + and cfg.get("normalize_actions") is True + ): + if ioctx.worker is None: + raise ValueError( + "actions_in_input_normalized is False but" + "cannot normalize actions since no workers exist" + ) + + # If we have a complex action space and actions were flattened + # and we have to normalize -> Error. + error_msg = ( + "Normalization of offline actions that are flattened is not " + "supported! Make sure that you record actions into offline " + "file with the `_disable_action_flattening=True` flag OR " + "as already normalized (between -1.0 and 1.0) values. " + "Also, when reading already normalized action values from " + "offline files, make sure to set " + "`actions_in_input_normalized=True` so that RLlib will not " + "perform normalization on top." + ) + + if isinstance(batch, SampleBatch): + policy = ioctx.worker.policy_map.get(DEFAULT_POLICY_ID) + if policy is None: + assert len(ioctx.worker.policy_map) == 1 + policy = next(iter(ioctx.worker.policy_map.values())) + if isinstance( + policy.action_space_struct, (tuple, dict) + ) and not policy.config.get("_disable_action_flattening"): + raise ValueError(error_msg) + batch[SampleBatch.ACTIONS] = normalize_action( + batch[SampleBatch.ACTIONS], policy.action_space_struct + ) + else: + for pid, b in batch.policy_batches.items(): + policy = ioctx.worker.policy_map[pid] + if isinstance( + policy.action_space_struct, (tuple, dict) + ) and not policy.config.get("_disable_action_flattening"): + raise ValueError(error_msg) + b[SampleBatch.ACTIONS] = normalize_action( + b[SampleBatch.ACTIONS], + ioctx.worker.policy_map[pid].action_space_struct, + ) + + return batch + + +@DeveloperAPI +def from_json_data(json_data: Any, worker: Optional["RolloutWorker"]): + # Try to infer the SampleBatchType (SampleBatch or MultiAgentBatch). + if "type" in json_data: + data_type = json_data.pop("type") + else: + raise ValueError("JSON record missing 'type' field") + + if data_type == "SampleBatch": + if worker is not None and len(worker.policy_map) != 1: + raise ValueError( + "Found single-agent SampleBatch in input file, but our " + "PolicyMap contains more than 1 policy!" + ) + for k, v in json_data.items(): + json_data[k] = unpack_if_needed(v) + if worker is not None: + policy = next(iter(worker.policy_map.values())) + json_data = _adjust_obs_actions_for_policy(json_data, policy) + json_data = _adjust_dones(json_data) + return SampleBatch(json_data) + elif data_type == "MultiAgentBatch": + policy_batches = {} + for policy_id, policy_batch in json_data["policy_batches"].items(): + inner = {} + for k, v in policy_batch.items(): + # Translate DONES into TERMINATEDS. + if k == SampleBatch.DONES: + k = SampleBatch.TERMINATEDS + inner[k] = unpack_if_needed(v) + if worker is not None: + policy = worker.policy_map[policy_id] + inner = _adjust_obs_actions_for_policy(inner, policy) + inner = _adjust_dones(inner) + policy_batches[policy_id] = SampleBatch(inner) + return MultiAgentBatch(policy_batches, json_data["count"]) + else: + raise ValueError( + "Type field must be one of ['SampleBatch', 'MultiAgentBatch']", data_type + ) + + +# TODO(jungong) : use DatasetReader to back JsonReader, so we reduce +# codebase complexity without losing existing functionality. +@PublicAPI +class JsonReader(InputReader): + """Reader object that loads experiences from JSON file chunks. + + The input files will be read from in random order. + """ + + @PublicAPI + def __init__( + self, inputs: Union[str, List[str]], ioctx: Optional[IOContext] = None + ): + """Initializes a JsonReader instance. + + Args: + inputs: Either a glob expression for files, e.g. `/tmp/**/*.json`, + or a list of single file paths or URIs, e.g., + ["s3://bucket/file.json", "s3://bucket/file2.json"]. + ioctx: Current IO context object or None. + """ + logger.info( + "You are using JSONReader. It is recommended to use " + + "DatasetReader instead for better sharding support." + ) + + self.ioctx = ioctx or IOContext() + self.default_policy = self.policy_map = None + self.batch_size = 1 + if self.ioctx: + self.batch_size = self.ioctx.config.get("train_batch_size", 1) + num_workers = self.ioctx.config.get("num_env_runners", 0) + if num_workers: + self.batch_size = max(math.ceil(self.batch_size / num_workers), 1) + + if self.ioctx.worker is not None: + self.policy_map = self.ioctx.worker.policy_map + self.default_policy = self.policy_map.get(DEFAULT_POLICY_ID) + if self.default_policy is None: + assert len(self.policy_map) == 1 + self.default_policy = next(iter(self.policy_map.values())) + + if isinstance(inputs, str): + inputs = os.path.abspath(os.path.expanduser(inputs)) + if os.path.isdir(inputs): + inputs = [os.path.join(inputs, "*.json"), os.path.join(inputs, "*.zip")] + logger.warning(f"Treating input directory as glob patterns: {inputs}") + else: + inputs = [inputs] + + if any(urlparse(i).scheme not in [""] + WINDOWS_DRIVES for i in inputs): + raise ValueError( + "Don't know how to glob over `{}`, ".format(inputs) + + "please specify a list of files to read instead." + ) + else: + self.files = [] + for i in inputs: + self.files.extend(glob.glob(i)) + elif isinstance(inputs, (list, tuple)): + self.files = list(inputs) + else: + raise ValueError( + "type of inputs must be list or str, not {}".format(inputs) + ) + if self.files: + logger.info("Found {} input files.".format(len(self.files))) + else: + raise ValueError("No files found matching {}".format(inputs)) + self.cur_file = None + + @override(InputReader) + def next(self) -> SampleBatchType: + ret = [] + count = 0 + while count < self.batch_size: + batch = self._try_parse(self._next_line()) + tries = 0 + while not batch and tries < 100: + tries += 1 + logger.debug("Skipping empty line in {}".format(self.cur_file)) + batch = self._try_parse(self._next_line()) + if not batch: + raise ValueError( + "Failed to read valid experience batch from file: {}".format( + self.cur_file + ) + ) + batch = self._postprocess_if_needed(batch) + count += batch.count + ret.append(batch) + ret = concat_samples(ret) + return ret + + def read_all_files(self) -> SampleBatchType: + """Reads through all files and yields one SampleBatchType per line. + + When reaching the end of the last file, will start from the beginning + again. + + Yields: + One SampleBatch or MultiAgentBatch per line in all input files. + """ + for path in self.files: + file = self._try_open_file(path) + while True: + line = file.readline() + if not line: + break + batch = self._try_parse(line) + if batch is None: + break + yield batch + + def _postprocess_if_needed(self, batch: SampleBatchType) -> SampleBatchType: + if not self.ioctx.config.get("postprocess_inputs"): + return batch + + batch = convert_ma_batch_to_sample_batch(batch) + + if isinstance(batch, SampleBatch): + out = [] + for sub_batch in batch.split_by_episode(): + out.append(self.default_policy.postprocess_trajectory(sub_batch)) + return concat_samples(out) + else: + # TODO(ekl) this is trickier since the alignments between agent + # trajectories in the episode are not available any more. + raise NotImplementedError( + "Postprocessing of multi-agent data not implemented yet." + ) + + def _try_open_file(self, path): + if urlparse(path).scheme not in [""] + WINDOWS_DRIVES: + if smart_open is None: + raise ValueError( + "You must install the `smart_open` module to read " + "from URIs like {}".format(path) + ) + ctx = smart_open + else: + # Allow shortcut for home directory ("~/" -> env[HOME]). + if path.startswith("~/"): + path = os.path.join(os.environ.get("HOME", ""), path[2:]) + + # If path doesn't exist, try to interpret is as relative to the + # rllib directory (located ../../ from this very module). + path_orig = path + if not os.path.exists(path): + path = os.path.join(Path(__file__).parent.parent, path) + if not os.path.exists(path): + raise FileNotFoundError(f"Offline file {path_orig} not found!") + + # Unzip files, if necessary and re-point to extracted json file. + if re.search("\\.zip$", path): + with zipfile.ZipFile(path, "r") as zip_ref: + zip_ref.extractall(Path(path).parent) + path = re.sub("\\.zip$", ".json", path) + assert os.path.exists(path) + ctx = open + file = ctx(path, "r") + return file + + def _try_parse(self, line: str) -> Optional[SampleBatchType]: + line = line.strip() + if not line: + return None + try: + batch = self._from_json(line) + except Exception: + logger.exception( + "Ignoring corrupt json record in {}: {}".format(self.cur_file, line) + ) + return None + + batch = postprocess_actions(batch, self.ioctx) + + return batch + + def _next_line(self) -> str: + if not self.cur_file: + self.cur_file = self._next_file() + line = self.cur_file.readline() + tries = 0 + while not line and tries < 100: + tries += 1 + if hasattr(self.cur_file, "close"): # legacy smart_open impls + self.cur_file.close() + self.cur_file = self._next_file() + line = self.cur_file.readline() + if not line: + logger.debug("Ignoring empty file {}".format(self.cur_file)) + if not line: + raise ValueError( + "Failed to read next line from files: {}".format(self.files) + ) + return line + + def _next_file(self) -> FileType: + # If this is the first time, we open a file, make sure all workers + # start with a different one if possible. + if self.cur_file is None and self.ioctx.worker is not None: + idx = self.ioctx.worker.worker_index + total = self.ioctx.worker.num_workers or 1 + path = self.files[round((len(self.files) - 1) * (idx / total))] + # After the first file, pick all others randomly. + else: + path = random.choice(self.files) + return self._try_open_file(path) + + def _from_json(self, data: str) -> SampleBatchType: + if isinstance(data, bytes): # smart_open S3 doesn't respect "r" + data = data.decode("utf-8") + json_data = json.loads(data) + return from_json_data(json_data, self.ioctx.worker) diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/tuned_examples/dreamerv3/flappy_bird.py b/deepseek/lib/python3.10/site-packages/ray/rllib/tuned_examples/dreamerv3/flappy_bird.py new file mode 100644 index 0000000000000000000000000000000000000000..31755b6dfe3c6f4ed3d1c6d8ed4068ed6012c237 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/tuned_examples/dreamerv3/flappy_bird.py @@ -0,0 +1,78 @@ +""" +[1] Mastering Diverse Domains through World Models - 2023 +D. Hafner, J. Pasukonis, J. Ba, T. Lillicrap +https://arxiv.org/pdf/2301.04104v1.pdf + +[2] Mastering Atari with Discrete World Models - 2021 +D. Hafner, T. Lillicrap, M. Norouzi, J. Ba +https://arxiv.org/pdf/2010.02193.pdf +""" + +# Run with: +# python run_regression_tests.py --dir [this file] + +from ray.rllib.algorithms.dreamerv3.dreamerv3 import DreamerV3Config +from ray import tune + + +# Number of GPUs to run on. +num_gpus = 0 + +# DreamerV3 config and default (1 GPU) learning rates. +config = DreamerV3Config() +w = config.world_model_lr +c = config.critic_lr + + +def _env_creator(ctx): + import flappy_bird_gymnasium # noqa doctest: +SKIP + import gymnasium as gym + from supersuit.generic_wrappers import resize_v1 + from ray.rllib.algorithms.dreamerv3.utils.env_runner import NormalizedImageEnv + + return NormalizedImageEnv( + resize_v1( # resize to 64x64 and normalize images + gym.make("FlappyBird-rgb-v0", audio_on=False), x_size=64, y_size=64 + ) + ) + + +# Register the FlappyBird-rgb-v0 env including necessary wrappers via the +# `tune.register_env()` API. +tune.register_env("flappy-bird", _env_creator) + +# Further specify the DreamerV3 config object to use. +( + config.environment("flappy-bird") + .resources( + num_cpus_for_main_process=1, + ) + .learners( + num_learners=0 if num_gpus == 1 else num_gpus, + num_gpus_per_learner=1 if num_gpus else 0, + ) + .env_runners( + # If we use >1 GPU and increase the batch size accordingly, we should also + # increase the number of envs per worker. + num_envs_per_env_runner=8 * (num_gpus or 1), + remote_worker_envs=True, + ) + .reporting( + metrics_num_episodes_for_smoothing=(num_gpus or 1), + report_images_and_videos=False, + report_dream_data=False, + report_individual_batch_item_stats=False, + ) + # See Appendix A. + .training( + model_size="M", + training_ratio=64, + batch_size_B=16 * (num_gpus or 1), + # Use a well established 4-GPU lr scheduling recipe: + # ~ 1000 training updates with 0.4x[default rates], then over a few hundred + # steps, increase to 4x[default rates]. + world_model_lr=[[0, 0.4 * w], [8000, 0.4 * w], [10000, 3 * w]], + critic_lr=[[0, 0.4 * c], [8000, 0.4 * c], [10000, 3 * c]], + actor_lr=[[0, 0.4 * c], [8000, 0.4 * c], [10000, 3 * c]], + ) +) diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/tuned_examples/dreamerv3/pendulum.py b/deepseek/lib/python3.10/site-packages/ray/rllib/tuned_examples/dreamerv3/pendulum.py new file mode 100644 index 0000000000000000000000000000000000000000..4acc4b9aa85a9386286e7f1fef1400b5b5fcbf4b --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/tuned_examples/dreamerv3/pendulum.py @@ -0,0 +1,19 @@ +""" +[1] Mastering Diverse Domains through World Models - 2023 +D. Hafner, J. Pasukonis, J. Ba, T. Lillicrap +https://arxiv.org/pdf/2301.04104v1.pdf + +[2] Mastering Atari with Discrete World Models - 2021 +D. Hafner, T. Lillicrap, M. Norouzi, J. Ba +https://arxiv.org/pdf/2010.02193.pdf +""" +from ray.rllib.algorithms.dreamerv3.dreamerv3 import DreamerV3Config + +# Run with: +# python run_regression_tests.py --dir [this file] + +config = ( + DreamerV3Config() + .environment("Pendulum-v1") + .training(model_size="XS", training_ratio=1024) +)