File size: 13,812 Bytes
079c32c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 |
from typing import Optional, Union, Any, List
from easydict import EasyDict
from ding.utils import deep_merge_dicts, SequenceType
from collections import namedtuple
import numpy as np
import torch
class LevelSampler():
"""
Overview:
Policy class of Prioritized Level Replay algorithm.
https://arxiv.org/pdf/2010.03934.pdf
PLR is a method for improving generalization and sample-efficiency of \
deep RL agents on procedurally-generated environments by adaptively updating \
a sampling distribution over the training levels based on a score of the learning \
potential of replaying each level.
"""
config = dict(
strategy='policy_entropy',
replay_schedule='fixed',
score_transform='rank',
temperature=1.0,
eps=0.05,
rho=0.2,
nu=0.5,
alpha=1.0,
staleness_coef=0,
staleness_transform='power',
staleness_temperature=1.0,
)
def __init__(
self,
seeds: Optional[List[int]],
obs_space: Union[int, SequenceType],
action_space: int,
num_actors: int,
cfg: EasyDict,
):
self.cfg = EasyDict(deep_merge_dicts(self.config, cfg))
self.cfg.update(cfg)
self.obs_space = obs_space
self.action_space = action_space
self.strategy = self.cfg.strategy
self.replay_schedule = self.cfg.replay_schedule
self.score_transform = self.cfg.score_transform
self.temperature = self.cfg.temperature
# Eps means the level replay epsilon for eps-greedy sampling
self.eps = self.cfg.eps
# Rho means the minimum size of replay set relative to total number of levels before sampling replays
self.rho = self.cfg.rho
# Nu means the probability of sampling a new level instead of a replay level
self.nu = self.cfg.nu
# Alpha means the level score EWA smoothing factor
self.alpha = self.cfg.alpha
self.staleness_coef = self.cfg.staleness_coef
self.staleness_transform = self.cfg.staleness_transform
self.staleness_temperature = self.cfg.staleness_temperature
# Track seeds and scores as in np arrays backed by shared memory
self.seeds = np.array(seeds, dtype=np.int64)
self.seed2index = {seed: i for i, seed in enumerate(seeds)}
self.unseen_seed_weights = np.ones(len(seeds))
self.seed_scores = np.zeros(len(seeds))
self.partial_seed_scores = np.zeros((num_actors, len(seeds)), dtype=np.float32)
self.partial_seed_steps = np.zeros((num_actors, len(seeds)), dtype=np.int64)
self.seed_staleness = np.zeros(len(seeds))
self.next_seed_index = 0 # Only used for sequential strategy
def update_with_rollouts(self, train_data: dict, num_actors: int):
total_steps = train_data['reward'].shape[0]
if self.strategy == 'random':
return
if self.strategy == 'policy_entropy':
score_function = self._entropy
elif self.strategy == 'least_confidence':
score_function = self._least_confidence
elif self.strategy == 'min_margin':
score_function = self._min_margin
elif self.strategy == 'gae':
score_function = self._gae
elif self.strategy == 'value_l1':
score_function = self._value_l1
elif self.strategy == 'one_step_td_error':
score_function = self._one_step_td_error
else:
raise ValueError('Not supported strategy: {}'.format(self.strategy))
self._update_with_rollouts(train_data, num_actors, total_steps, score_function)
for actor_index in range(self.partial_seed_scores.shape[0]):
for seed_idx in range(self.partial_seed_scores.shape[1]):
if self.partial_seed_scores[actor_index][seed_idx] != 0:
self.update_seed_score(actor_index, seed_idx, 0, 0)
self.partial_seed_scores.fill(0)
self.partial_seed_steps.fill(0)
def update_seed_score(self, actor_index: int, seed_idx: int, score: float, num_steps: int):
score = self._partial_update_seed_score(actor_index, seed_idx, score, num_steps, done=True)
self.unseen_seed_weights[seed_idx] = 0. # No longer unseen
old_score = self.seed_scores[seed_idx]
self.seed_scores[seed_idx] = (1 - self.alpha) * old_score + self.alpha * score
def _partial_update_seed_score(
self, actor_index: int, seed_idx: int, score: float, num_steps: int, done: bool = False
):
partial_score = self.partial_seed_scores[actor_index][seed_idx]
partial_num_steps = self.partial_seed_steps[actor_index][seed_idx]
running_num_steps = partial_num_steps + num_steps
merged_score = partial_score + (score - partial_score) * num_steps / float(running_num_steps)
if done:
self.partial_seed_scores[actor_index][seed_idx] = 0. # zero partial score, partial num_steps
self.partial_seed_steps[actor_index][seed_idx] = 0
else:
self.partial_seed_scores[actor_index][seed_idx] = merged_score
self.partial_seed_steps[actor_index][seed_idx] = running_num_steps
return merged_score
def _entropy(self, **kwargs):
episode_logits = kwargs['episode_logits']
num_actions = self.action_space
max_entropy = -(1. / num_actions) * np.log(1. / num_actions) * num_actions
return (-torch.exp(episode_logits) * episode_logits).sum(-1).mean().item() / max_entropy
def _least_confidence(self, **kwargs):
episode_logits = kwargs['episode_logits']
return (1 - torch.exp(episode_logits.max(-1, keepdim=True)[0])).mean().item()
def _min_margin(self, **kwargs):
episode_logits = kwargs['episode_logits']
top2_confidence = torch.exp(episode_logits.topk(2, dim=-1)[0])
return 1 - (top2_confidence[:, 0] - top2_confidence[:, 1]).mean().item()
def _gae(self, **kwargs):
advantages = kwargs['adv']
return advantages.mean().item()
def _value_l1(self, **kwargs):
advantages = kwargs['adv']
# If the absolute value of ADV is large, it means that the level can significantly change
# the policy and can be used to learn more
return advantages.abs().mean().item()
def _one_step_td_error(self, **kwargs):
rewards = kwargs['rewards']
value = kwargs['value']
max_t = len(rewards)
td_errors = (rewards[:-1] + value[:max_t - 1] - value[1:max_t]).abs()
return td_errors.abs().mean().item()
def _update_with_rollouts(self, train_data: dict, num_actors: int, all_total_steps: int, score_function):
level_seeds = train_data['seed'].reshape(num_actors, int(all_total_steps / num_actors)).transpose(0, 1)
policy_logits = train_data['logit'].reshape(num_actors, int(all_total_steps / num_actors), -1).transpose(0, 1)
done = train_data['done'].reshape(num_actors, int(all_total_steps / num_actors)).transpose(0, 1)
total_steps, num_actors = policy_logits.shape[:2]
num_decisions = len(policy_logits)
for actor_index in range(num_actors):
done_steps = done[:, actor_index].nonzero()[:total_steps, 0]
start_t = 0
for t in done_steps:
if not start_t < total_steps:
break
if t == 0: # if t is 0, then this done step caused a full update of previous seed last cycle
continue
seed_t = level_seeds[start_t, actor_index].item()
seed_t = int(seed_t)
seed_idx_t = self.seed2index[seed_t]
score_function_kwargs = {}
episode_logits = policy_logits[start_t:t, actor_index]
score_function_kwargs['episode_logits'] = torch.log_softmax(episode_logits, -1)
if self.strategy in ['gae', 'value_l1', 'one_step_td_error']:
rewards = train_data['reward'].reshape(num_actors,
int(all_total_steps / num_actors)).transpose(0, 1)
adv = train_data['adv'].reshape(num_actors, int(all_total_steps / num_actors)).transpose(0, 1)
value = train_data['value'].reshape(num_actors, int(all_total_steps / num_actors)).transpose(0, 1)
score_function_kwargs['adv'] = adv[start_t:t, actor_index]
score_function_kwargs['rewards'] = rewards[start_t:t, actor_index]
score_function_kwargs['value'] = value[start_t:t, actor_index]
score = score_function(**score_function_kwargs)
num_steps = len(episode_logits)
self.update_seed_score(actor_index, seed_idx_t, score, num_steps)
start_t = t.item()
if start_t < total_steps:
seed_t = level_seeds[start_t, actor_index].item()
seed_idx_t = self.seed2index[seed_t]
score_function_kwargs = {}
episode_logits = policy_logits[start_t:, actor_index]
score_function_kwargs['episode_logits'] = torch.log_softmax(episode_logits, -1)
if self.strategy in ['gae', 'value_l1', 'one_step_td_error']:
rewards = train_data['reward'].reshape(num_actors,
int(all_total_steps / num_actors)).transpose(0, 1)
adv = train_data['adv'].reshape(num_actors, int(all_total_steps / num_actors)).transpose(0, 1)
value = train_data['value'].reshape(num_actors, int(all_total_steps / num_actors)).transpose(0, 1)
score_function_kwargs['adv'] = adv[start_t:, actor_index]
score_function_kwargs['rewards'] = rewards[start_t:, actor_index]
score_function_kwargs['value'] = value[start_t:, actor_index]
score = score_function(**score_function_kwargs)
num_steps = len(episode_logits)
self._partial_update_seed_score(actor_index, seed_idx_t, score, num_steps)
def _update_staleness(self, selected_idx: int):
if self.staleness_coef > 0:
self.seed_staleness += 1
self.seed_staleness[selected_idx] = 0
def _sample_replay_level(self):
sample_weights = self._sample_weights()
if np.isclose(np.sum(sample_weights), 0):
sample_weights = np.ones_like(sample_weights, dtype=np.float32) / len(sample_weights)
seed_idx = np.random.choice(range(len(self.seeds)), 1, p=sample_weights)[0]
seed = self.seeds[seed_idx]
self._update_staleness(seed_idx)
return int(seed)
def _sample_unseen_level(self):
sample_weights = self.unseen_seed_weights / self.unseen_seed_weights.sum()
seed_idx = np.random.choice(range(len(self.seeds)), 1, p=sample_weights)[0]
seed = self.seeds[seed_idx]
self._update_staleness(seed_idx)
return int(seed)
def sample(self, strategy: Optional[str] = None):
if not strategy:
strategy = self.strategy
if strategy == 'random':
seed_idx = np.random.choice(range(len(self.seeds)))
seed = self.seeds[seed_idx]
return int(seed)
elif strategy == 'sequential':
seed_idx = self.next_seed_index
self.next_seed_index = (self.next_seed_index + 1) % len(self.seeds)
seed = self.seeds[seed_idx]
return int(seed)
num_unseen = (self.unseen_seed_weights > 0).sum()
proportion_seen = (len(self.seeds) - num_unseen) / len(self.seeds)
if self.replay_schedule == 'fixed':
if proportion_seen >= self.rho:
# Sample replay level with fixed prob = 1 - nu OR if all levels seen
if np.random.rand() > self.nu or not proportion_seen < 1.0:
return self._sample_replay_level()
# Otherwise, sample a new level
return self._sample_unseen_level()
else: # Default to proportionate schedule
if proportion_seen >= self.rho and np.random.rand() < proportion_seen:
return self._sample_replay_level()
else:
return self._sample_unseen_level()
def _sample_weights(self):
weights = self._score_transform(self.score_transform, self.temperature, self.seed_scores)
weights = weights * (1 - self.unseen_seed_weights) # zero out unseen levels
z = np.sum(weights)
if z > 0:
weights /= z
staleness_weights = 0
if self.staleness_coef > 0:
staleness_weights = self._score_transform(
self.staleness_transform, self.staleness_temperature, self.seed_staleness
)
staleness_weights = staleness_weights * (1 - self.unseen_seed_weights)
z = np.sum(staleness_weights)
if z > 0:
staleness_weights /= z
weights = (1 - self.staleness_coef) * weights + self.staleness_coef * staleness_weights
return weights
def _score_transform(self, transform: Optional[str], temperature: float, scores: Optional[List[float]]):
if transform == 'rank':
temp = np.flip(scores.argsort())
ranks = np.empty_like(temp)
ranks[temp] = np.arange(len(temp)) + 1
weights = 1 / ranks ** (1. / temperature)
elif transform == 'power':
eps = 0 if self.staleness_coef > 0 else 1e-3
weights = (np.array(scores) + eps) ** (1. / temperature)
return weights
|