delinqu commited on
Commit
367577f
·
verified ·
1 Parent(s): ad6d8e2

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
action_tokenizer.py ADDED
@@ -0,0 +1,446 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+ # Copyright (c) 2025 IPEC at Shanghai AI Laboratory
3
+ # Permission is hereby granted, free of charge, to use, copy, modify, merge, publish,
4
+ # distribute, sublicense, and/or sell copies of the Software, subject to the following conditions:
5
+ # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
6
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND.
7
+ # coding=utf-8
8
+
9
+ """
10
+ action_tokenizer.py
11
+
12
+ Extension class; wraps base LLM/VLM tokenizer with logic to discretize and tokenize continuous robot actions.
13
+ """
14
+ from typing import List, Union, Dict, Tuple, Optional
15
+ import numpy as np
16
+ from transformers import PreTrainedTokenizerBase
17
+ from pathlib import Path
18
+ import json
19
+ from scipy.stats import norm
20
+ import torch
21
+
22
+ ACTION_TOKEN = '<ACTION{:05d}>'
23
+
24
+ """Spatial Tokenizer"""
25
+ class ActionTokenizer:
26
+ def __init__(
27
+ self,
28
+ tokenizer: PreTrainedTokenizerBase,
29
+ num_bins: int = 256,
30
+ min_action: int = -1,
31
+ max_action: int = 1,
32
+ ):
33
+ self._vocab_size = num_bins
34
+ self.tokenizer = tokenizer
35
+ self.min_action, self.max_action = min_action, max_action
36
+ self.bin_centers = np.linspace(min_action, max_action, num_bins)
37
+
38
+ # add special action tokens to language tokenizer
39
+ token_list = [ACTION_TOKEN.format(i) for i in range(self._vocab_size)]
40
+ self.token_array = np.array(token_list)
41
+
42
+ num_new_tokens = self.tokenizer.add_tokens(token_list, special_tokens=True)
43
+ print(f"Add {num_new_tokens} TRANSLATION TOKENS, tokenizer vocab size {self.tokenizer.vocab_size} / {len(tokenizer)}")
44
+
45
+ self.action_token_begin_idx = self.token_start_idx = self.tokenizer.convert_tokens_to_ids(self.token_array[0])
46
+ self.token_end_idx = self.tokenizer.convert_tokens_to_ids(self.token_array[-1])
47
+
48
+ def __call__(self, action: np.ndarray) -> List[str]:
49
+ """Discretize continuous actions to tokens.
50
+ action: np.ndarray, (n, 7), continuous actions in Cartesian or Spherical coordinates.
51
+ return: np.ndarray, (n, 7), tokens.
52
+ """
53
+ action = np.clip(action, a_min=float(self.min_action), a_max=float(self.max_action))
54
+ ids = np.digitize(action, self.bin_centers, right=True) # [0, 255]
55
+ return self.token_array[ids]
56
+
57
+ def decode_token_ids_to_actions(self, action_token_id: np.ndarray) -> np.ndarray:
58
+ """decode token ids to continuous actions.
59
+ action_token_id: np.ndarray, (n, 7), token ids.
60
+ return: np.ndarray, (n, 7), continuous actions
61
+ """
62
+ ids = action_token_id - self.action_token_begin_idx
63
+ ids = np.clip(ids, a_min=0, a_max=self._vocab_size - 1)
64
+ return self.bin_centers[ids]
65
+
66
+ @property
67
+ def vocab_size(self) -> int:
68
+ return self._vocab_size
69
+
70
+ """Spatial Tokenizer"""
71
+ class TranslationTokenizer:
72
+ def __init__(
73
+ self,
74
+ tokenizer: PreTrainedTokenizerBase,
75
+ num_bins: Dict,
76
+ bin_policy: Optional[Dict] = None,
77
+ use_spherical: bool = True,
78
+ ):
79
+ self.tokenizer = tokenizer
80
+ self.num_theta_bins = num_bins["theta_bins"]
81
+ self.num_phi_bins = num_bins["phi_bins"]
82
+ self.num_r_bins = num_bins["r_bins"]
83
+ self.use_spherical = use_spherical
84
+
85
+ # for indexing
86
+ self.NP = self.num_phi_bins * self.num_r_bins
87
+
88
+ # add special action tokens to language tokenizer
89
+ self._vocab_size = self.num_theta_bins * self.num_phi_bins * self.num_r_bins
90
+ token_list = [ACTION_TOKEN.format(i) for i in range(self._vocab_size)]
91
+ self.token_array = np.array(token_list)
92
+
93
+ num_new_tokens = self.tokenizer.add_tokens(token_list, special_tokens=True)
94
+ print(f"Add {num_new_tokens} TRANSLATION TOKENS, tokenizer vocab size {self.tokenizer.vocab_size} / {len(tokenizer)}")
95
+
96
+ self.token_start_idx = self.tokenizer.convert_tokens_to_ids(self.token_array[0])
97
+ self.token_end_idx = self.tokenizer.convert_tokens_to_ids(self.token_array[-1])
98
+ self.set_bins(bin_policy)
99
+
100
+ def set_bins(self, bin_policy):
101
+ self.theta_bins = np.array(bin_policy["theta_bins"])
102
+ self.phi_bins = np.array(bin_policy["phi_bins"])
103
+ self.r_bins = np.array(bin_policy["r_bins"])
104
+
105
+ def cartesian_to_spherical(self, x, y, z):
106
+ theta = np.arctan2(np.sqrt(x**2 + y**2), z) # polar angle
107
+ phi = np.arctan2(y, x) # azimuthal angle
108
+ r = np.sqrt(x**2 + y**2 + z**2)
109
+ return theta, phi, r
110
+
111
+ def spherical_to_cartesian(self, theta, phi, r):
112
+ x = r * np.sin(theta) * np.cos(phi)
113
+ y = r * np.sin(theta) * np.sin(phi)
114
+ z = r * np.cos(theta)
115
+ return x, y, z
116
+
117
+ def __call__(self, action: np.ndarray) -> List[str]:
118
+ """Discretize continuous actions to tokens.
119
+ action: np.ndarray, (n, 3), continuous actions in Cartesian or Spherical coordinates.
120
+ return: np.ndarray, (n,), tokens.
121
+ """
122
+ if self.use_spherical:
123
+ theta, phi, r = self.cartesian_to_spherical(action[:, 0], action[:, 1], action[:, 2])
124
+ else:
125
+ theta, phi, r = action[:, 0], action[:, 1], action[:, 2]
126
+
127
+ disc_theta = np.digitize(theta, self.theta_bins[1:-1]) # b
128
+ disc_phi = np.digitize(phi, self.phi_bins[1:-1])
129
+ disc_r = np.digitize(r, self.r_bins[1:-1])
130
+ ids = disc_theta * self.NP + disc_phi * self.num_r_bins + disc_r
131
+ return self.token_array[ids]
132
+
133
+ def decode_token_ids_to_actions(self, action_token_id: np.ndarray) -> np.ndarray:
134
+ """decode token ids to continuous actions.
135
+ action_token_id: np.ndarray, (n,), token ids.
136
+ return: np.ndarray, (n, 3), continuous actions
137
+ """
138
+ action_token_id = np.clip(action_token_id, self.token_start_idx, self.token_end_idx)
139
+ ids = action_token_id - self.token_start_idx
140
+ disc_theta, disc_phi, disc_r = ids // self.NP, (ids % self.NP) // self.num_r_bins, ids % self.num_r_bins
141
+
142
+ theta = 0.5 * (self.theta_bins[disc_theta] + self.theta_bins[disc_theta + 1])
143
+ phi = 0.5 * (self.phi_bins[disc_phi] + self.phi_bins[disc_phi + 1])
144
+ r = 0.5 * (self.r_bins[disc_r] + self.r_bins[disc_r + 1])
145
+
146
+ # clip action to [-1, 1], due to the spherical coordinate action space is the circumscribed sphere of the Cartesian action space.
147
+ x, y, z = self.spherical_to_cartesian(theta, phi, r) if self.use_spherical else (theta, phi, r)
148
+ x, y, z = np.clip([x, y, z], -1, 1)
149
+ return np.stack((x, y, z), axis=1)
150
+
151
+ @property
152
+ def vocab_size(self) -> int:
153
+ return self._vocab_size
154
+
155
+ class RotationTokenizer:
156
+ def __init__(
157
+ self,
158
+ tokenizer: PreTrainedTokenizerBase,
159
+ num_bins: Dict,
160
+ bin_policy: Optional[Dict] = None,
161
+ array_begin_idx=None,
162
+ ):
163
+ self.tokenizer = tokenizer
164
+ self.num_roll_bins = num_bins["roll_bins"] # M
165
+ self.num_pitch_bins = num_bins["pitch_bins"] # N
166
+ self.num_yaw_bins = num_bins["yaw_bins"] # P
167
+ self.array_begin_idx = array_begin_idx
168
+
169
+ # for indexing
170
+ self.NP = self.num_pitch_bins * self.num_yaw_bins
171
+
172
+ # add special action tokens to language tokenizer
173
+ self._vocab_size = self.num_roll_bins * self.num_pitch_bins * self.num_yaw_bins
174
+ token_list = [ACTION_TOKEN.format(i + self.array_begin_idx) for i in range(self._vocab_size)]
175
+ self.token_array = np.array(token_list)
176
+
177
+ num_new_tokens = self.tokenizer.add_tokens(token_list, special_tokens=True)
178
+ print(f"Add {num_new_tokens} ROTATION TOKENS to tokenizer, tokenizer vocab size {self.tokenizer.vocab_size} / {len(tokenizer)}")
179
+
180
+ self.token_start_idx = self.tokenizer.convert_tokens_to_ids(self.token_array[0])
181
+ self.token_end_idx = self.tokenizer.convert_tokens_to_ids(self.token_array[-1])
182
+ self.set_bins(bin_policy)
183
+
184
+ def set_bins(self, bin_policy):
185
+ self.roll_bins = np.array(bin_policy["roll_bins"])
186
+ self.pitch_bins = np.array(bin_policy["pitch_bins"])
187
+ self.yaw_bins = np.array(bin_policy["yaw_bins"])
188
+
189
+ def __call__(self, action: np.ndarray) -> List[str]:
190
+ """Discretize continuous actions to tokens.
191
+ action: np.ndarray, (n, 3), continuous actions in Cartesian or Spherical coordinates.
192
+ return: np.ndarray, (n,), tokens.
193
+ """
194
+ roll, pitch, yaw = action[:, 0], action[:, 1], action[:, 2]
195
+ disc_roll = np.clip(np.digitize(roll, self.roll_bins) - 1, 0, self.num_roll_bins - 1)
196
+ disc_pitch = np.clip(np.digitize(pitch, self.pitch_bins) - 1, 0, self.num_pitch_bins - 1)
197
+ disc_yaw = np.clip(np.digitize(yaw, self.yaw_bins) - 1, 0, self.num_yaw_bins - 1)
198
+
199
+ ids = disc_roll * self.NP + disc_pitch * self.num_yaw_bins + disc_yaw
200
+ return self.token_array[ids]
201
+
202
+ def decode_token_ids_to_actions(self, action_token_id: Union[np.int64, np.ndarray]) -> np.ndarray:
203
+ """decode token ids to continuous actions.
204
+ action_token_id: np.ndarray, (n,), token ids.
205
+ return: np.ndarray, (n, 3), continuous actions
206
+ """
207
+ action_token_id = np.clip(action_token_id, a_min=self.token_start_idx, a_max=self.token_end_idx)
208
+ ids = action_token_id - self.token_start_idx
209
+ disc_roll, disc_pitch, disc_yaw = ids // self.NP, (ids % self.NP) // self.num_yaw_bins, ids % self.num_yaw_bins
210
+
211
+ roll = 0.5 * (self.roll_bins[disc_roll] + self.roll_bins[disc_roll + 1])
212
+ pitch = 0.5 * (self.pitch_bins[disc_pitch] + self.pitch_bins[disc_pitch + 1])
213
+ yaw = 0.5 * (self.yaw_bins[disc_yaw] + self.yaw_bins[disc_yaw + 1])
214
+ return np.stack((roll, pitch, yaw), axis=1)
215
+
216
+ @property
217
+ def vocab_size(self) -> int:
218
+ return self._vocab_size
219
+
220
+ class GripperTokenzier:
221
+ def __init__(
222
+ self,
223
+ tokenizer: PreTrainedTokenizerBase,
224
+ num_bins: int = 2,
225
+ array_begin_idx = None,
226
+ ) -> None:
227
+ self.tokenizer = tokenizer
228
+ self.num_bins = num_bins
229
+ self.array_begin_idx = array_begin_idx
230
+ token_list = [ACTION_TOKEN.format(i + self.array_begin_idx) for i in range(self.num_bins)]
231
+ self.token_array = np.array(token_list)
232
+
233
+ num_new_tokens = self.tokenizer.add_tokens(token_list, special_tokens=True)
234
+ print(f"Add {num_new_tokens} GRIPPER TOKENS to tokenizer, tokenizer vocab size {self.tokenizer.vocab_size} / {len(tokenizer)}")
235
+
236
+ self.token_start_idx = self.tokenizer.convert_tokens_to_ids(self.token_array[0])
237
+ self.token_end_idx = self.tokenizer.convert_tokens_to_ids(self.token_array[-1])
238
+
239
+ def __call__(self, action: np.ndarray) -> List[str]:
240
+ """Discretize continuous actions to tokens.
241
+ action: np.ndarray, (n,), continuous actions in Cartesian or Spherical coordinates.
242
+ return: np.ndarray, (n,), tokens.
243
+ """
244
+ ids = np.where(action >= 0.5, 1, 0)
245
+ return self.token_array[ids]
246
+
247
+ def decode_token_ids_to_actions(self, action_token_id: np.ndarray) -> np.ndarray:
248
+ """decode token ids to continuous actions.
249
+ action_token_id: np.ndarray, (n,), token ids.
250
+ return: np.ndarray, (n, 1), continuous actions
251
+ """
252
+ action_token_id = np.clip(action_token_id, self.token_start_idx, self.token_end_idx)
253
+ ids = action_token_id - self.token_start_idx
254
+ actions = np.where(ids == 0, 0., 1.)
255
+ return actions[:, None]
256
+
257
+ @property
258
+ def vocab_size(self) -> int:
259
+ return self.num_bins
260
+
261
+ class SphericalCoordinateActionTokenizer:
262
+ range_bins = {
263
+ "translation": {
264
+ "theta_bins": (0.0, np.pi),
265
+ "phi_bins": (-np.pi, np.pi),
266
+ "r_bins": (0.0, np.sqrt(3)),
267
+ },
268
+ "rotation": {
269
+ "roll_bins": (-1.0, 1.0),
270
+ "pitch_bins": (-1.0, 1.0),
271
+ "yaw_bins": (-1.0, 1.0),
272
+ },
273
+ }
274
+ def __init__(
275
+ self,
276
+ tokenizer: PreTrainedTokenizerBase,
277
+ num_bins: Dict,
278
+ gs_params: Dict = None,
279
+ bin_policy: Dict = None,
280
+ use_spherical: bool = True,
281
+ min_sigma: float = 0.0,
282
+ min_action: float = -1.0,
283
+ max_action: float = 1.0,
284
+ ):
285
+ """set bin_policy if exist, otherwise, caculate bin_policy from gs_params.(unifrom if None Gaussian)
286
+ gs_params: Optional[Dict],
287
+ bin_policy: Optional[Dict],
288
+ """
289
+ self.tokenizer = tokenizer
290
+ self.min_action, self.max_action = min_action, max_action
291
+ self.num_bins = num_bins
292
+ self.min_sigma = min_sigma
293
+
294
+ # set bin policy
295
+ self.bin_policy = bin_policy if bin_policy else self.get_bin_policy(gs_params, self.min_sigma)
296
+
297
+ self.translation_tokenizer = TranslationTokenizer(
298
+ self.tokenizer,
299
+ self.num_bins["translation"],
300
+ self.bin_policy["translation"],
301
+ use_spherical=use_spherical
302
+ )
303
+
304
+ self.rotation_tokenizer = RotationTokenizer(
305
+ self.tokenizer,
306
+ self.num_bins["rotation"],
307
+ self.bin_policy["rotation"],
308
+ array_begin_idx=self.translation_tokenizer.vocab_size,
309
+ )
310
+
311
+ self.gripper_tokenizer = GripperTokenzier(
312
+ self.tokenizer,
313
+ self.num_bins["gripper"],
314
+ array_begin_idx=self.translation_tokenizer.vocab_size + self.rotation_tokenizer.vocab_size
315
+ )
316
+ self._vocab_size = self.translation_tokenizer.vocab_size + self.rotation_tokenizer.vocab_size + self.gripper_tokenizer.vocab_size
317
+
318
+ def __call__(self, action: np.ndarray) -> List[str]:
319
+ """Discretize continuous actions to tokens.
320
+ action: np.ndarray, (n, 7), continuous actions in Cartesian coordinates.
321
+ return: np.ndarray, (n, 3), tokens.
322
+ """
323
+ if len(action.shape) == 1:
324
+ assert action.shape[0] == 7, f"action dim mismatch, got action shape: {action.shape}"
325
+ action = action.reshape(1, 7)
326
+ assert action.shape[1] == 7, f"action dim mismatch, got action shape: {action.shape}"
327
+
328
+ action = np.clip(action, a_min=self.min_action, a_max=self.max_action)
329
+ trans_tokens = self.translation_tokenizer(action[:, :3]) # (n,)
330
+ rot_tokens = self.rotation_tokenizer(action[:, 3:6]) # (n,)
331
+ grip_tokens = self.gripper_tokenizer(action[:, 6]) # (n,)
332
+ return np.stack((trans_tokens, rot_tokens, grip_tokens), axis=1) # (n, 3)
333
+
334
+ def decode_token_ids_to_actions(self, action_token_ids: np.ndarray) -> np.ndarray:
335
+ """decode token ids to continuous actions.
336
+ action_token_ids: np.ndarray, (n, 3), token ids.
337
+ """
338
+ if len(action_token_ids.shape) == 1:
339
+ assert action_token_ids.shape[0] == 3, f"action token id numbers mismatich, need 3 got {action_token_ids.shape[0]}"
340
+ action_token_ids = action_token_ids.reshape(1, 3)
341
+ assert action_token_ids.shape[1] == 3, f"token id numbers mismatich, need 3 got {action_token_ids.shape[1]}"
342
+
343
+ trans_action = self.translation_tokenizer.decode_token_ids_to_actions(action_token_ids[:, 0]) # (n, 3)
344
+ rot_action = self.rotation_tokenizer.decode_token_ids_to_actions(action_token_ids[:, 1]) # (n, 3)
345
+ grip_action = self.gripper_tokenizer.decode_token_ids_to_actions(action_token_ids[:, 2]) # (n, 1)
346
+ return np.concatenate((trans_action, rot_action, grip_action), axis=1) # (n, 7)
347
+
348
+ @property
349
+ def vocab_size(self) -> int:
350
+ return self._vocab_size
351
+
352
+ @property
353
+ def action_token_begin_idx(self) -> int:
354
+ return self.translation_tokenizer.token_start_idx
355
+
356
+ def get_bin_policy(self, gs_params=None, min_sigma=0.0):
357
+ bin_policy = {
358
+ "translation": {"theta_bins": None, "phi_bins": None, "r_bins": None},
359
+ "rotation": {"roll_bins": None, "pitch_bins": None, "yaw_bins": None}
360
+ }
361
+ if gs_params is None:
362
+ for bin_type in self.range_bins.keys():
363
+ for bin_key in self.range_bins[bin_type].keys():
364
+ bin_policy[bin_type][bin_key] = np.linspace(*self.range_bins[bin_type][bin_key], self.num_bins[bin_type][bin_key] + 1)
365
+ print(f"use unifrom bin grids ... \n{bin_policy}")
366
+ else:
367
+ for bin_type in self.range_bins.keys():
368
+ for bin_key in self.range_bins[bin_type].keys():
369
+ mu = gs_params[bin_key.split("_")[0].lower()]["mu"]
370
+ sigma = max(gs_params[bin_key.split("_")[0].lower()]["sigma"], min_sigma)
371
+ bin_bound_prob = np.linspace(
372
+ norm.cdf(self.range_bins[bin_type][bin_key][0], loc=mu, scale=sigma),
373
+ norm.cdf(self.range_bins[bin_type][bin_key][1], loc=mu, scale=sigma),
374
+ self.num_bins[bin_type][bin_key] + 1,
375
+ )
376
+ bin_boundary = norm.ppf(bin_bound_prob, loc=mu, scale=sigma)
377
+ bin_policy[bin_type][bin_key] = np.clip(
378
+ bin_boundary,
379
+ self.range_bins[bin_type][bin_key][0],
380
+ self.range_bins[bin_type][bin_key][1],
381
+ ).tolist() # for serialize
382
+ print(f"caculate bin grids from gaussians \n{bin_policy}")
383
+ return bin_policy
384
+
385
+ def get_norm_meshgrid(self, bin_policy):
386
+ grids = []
387
+ policy = {k1: {k2: np.array(v2) for k2, v2 in v1.items()} for k1, v1 in bin_policy.items()}
388
+ # NOTE: use unify k,v order of range_bins (tpr, rpy)
389
+ for bin_type in self.range_bins.keys():
390
+ bounds = []
391
+ for bin_key in self.range_bins[bin_type].keys():
392
+ minb, maxb = self.range_bins[bin_type][bin_key][0], self.range_bins[bin_type][bin_key][1]
393
+ bin_boundary = policy[bin_type][bin_key]
394
+ bin_center = (bin_boundary[:-1] + bin_boundary[1:]) / 2
395
+ bin_center = np.concatenate([np.array([minb]),bin_center,np.array([maxb])]) # padding
396
+ bin_center = (bin_center - minb) / (maxb - minb) # nomalize (m, n, k)
397
+ bounds.append(bin_center)
398
+ # generate grids
399
+ grid_x, grid_y, grid_z = np.meshgrid(*bounds)
400
+ grids += [np.stack([grid_x, grid_y, grid_z], -1).reshape(-1, 3)]
401
+ return grids[0], grids[1] # (N, 3)
402
+
403
+ def spatial_embedding_adaption(self, gs_params, embeddings: torch.nn.Embedding, min_sigma=0.0, adpt_feature=False):
404
+ """
405
+ gs_params0, gs_params1: Dict
406
+ embeddings: tensor (S,E)
407
+ """
408
+ from scipy.interpolate import griddata
409
+ # __import__("ipdb").set_trace()
410
+
411
+ new_policy = self.get_bin_policy(gs_params, min_sigma=min_sigma)
412
+ trans_grids0, rot_grids0 = self.get_norm_meshgrid(self.bin_policy)
413
+ trans_grids1, rot_grids1 = self.get_norm_meshgrid(new_policy)
414
+
415
+ print("🔥 overwrite bin policy and tokenizer bins ...")
416
+ self.bin_policy = new_policy
417
+ self.min_sigma = min_sigma
418
+ self.translation_tokenizer.set_bins(new_policy["translation"])
419
+ self.rotation_tokenizer.set_bins(new_policy["rotation"])
420
+
421
+ if adpt_feature:
422
+ emb_data = embeddings.weight.data # (S, e)
423
+ _, E = emb_data.shape
424
+
425
+ # translation
426
+ m, n, k = (self.num_bins["translation"][k] for k in ["theta_bins", "phi_bins", "r_bins"])
427
+ N = m*n*k
428
+ trans_emb_data = emb_data[:N,].reshape(m, n, k, -1).permute(3, 0, 1, 2) # (e, m, n, k)
429
+ pad_emb = torch.nn.functional.pad(trans_emb_data, (1, 1, 1, 1, 1, 1), "replicate").permute(1, 2, 3, 0).reshape(-1, E)
430
+ adpt_trans_emb = griddata(trans_grids0, pad_emb.float(), trans_grids1, method='linear')
431
+ adpt_trans_emb = adpt_trans_emb.reshape(m+2, n+2, k+2, E)[1:-1, 1:-1, 1:-1,]
432
+
433
+ # rotation
434
+ m1, n1, k1 = (self.num_bins["rotation"][k] for k in ["roll_bins", "pitch_bins", "yaw_bins"])
435
+ M = m1*n1*k1
436
+ rot_emb_data = emb_data[N : N + M,].reshape(m1, n1, k1, -1).permute(3, 0, 1, 2) # (e, m, n, k)
437
+ pad_emb = torch.nn.functional.pad(rot_emb_data, (1, 1, 1, 1, 1, 1), "replicate").permute(1, 2, 3, 0).reshape(-1, E)
438
+ adpt_rot_emb = griddata(rot_grids0, pad_emb.float(), rot_grids1, method='linear')
439
+ adpt_rot_emb = adpt_rot_emb.reshape(m1+2, n1+2, k1+2, E)[1:-1, 1:-1, 1:-1,]
440
+
441
+ # set data
442
+ device, dtype = embeddings.weight.data.device, embeddings.weight.data.dtype
443
+ embeddings.weight.data[:N] = torch.Tensor(adpt_trans_emb.reshape(-1, E), device=device).to(dtype)
444
+ embeddings.weight.data[N:N+M] = torch.Tensor(adpt_rot_emb.reshape(-1, E), device=device).to(dtype)
445
+ print("🚀 DONE! adapt spatial embedding to new gaussian distributation finished.")
446
+ print(embeddings.weight.data)
config.json ADDED
@@ -0,0 +1,320 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "../pretrained/2025-01-05_09-12-37_oxe_spatial_vla_paligemma3b_zoe_gsN8194_gpu64-204k",
3
+ "_vocab_size": 265347,
4
+ "action_token_begin_idx": 257153,
5
+ "architectures": [
6
+ "SpatialVLAForConditionalGeneration"
7
+ ],
8
+ "auto_map": {
9
+ "AutoConfig": "configuration_spatialvla.SpatialVLAConfig",
10
+ "AutoModel": "modeling_spatialvla.SpatialVLAForConditionalGeneration"
11
+ },
12
+ "bos_token_id": 2,
13
+ "ego3d_patch_reso": 2,
14
+ "eos_token_id": 1,
15
+ "hidden_size": 2048,
16
+ "image_token_index": 257152,
17
+ "model_type": "spatialvla",
18
+ "n_freqs": 8,
19
+ "num_hidden_layers": 26,
20
+ "pad_token_id": 0,
21
+ "projection_dim": 2304,
22
+ "spatial_token_num": 8194,
23
+ "text_config": {
24
+ "_attn_implementation_autoset": true,
25
+ "architectures": [
26
+ "Gemma2ForCausalLM"
27
+ ],
28
+ "eos_token_id": [
29
+ 1,
30
+ 107
31
+ ],
32
+ "hidden_act": "gelu_pytorch_tanh",
33
+ "hidden_size": 2304,
34
+ "intermediate_size": 9216,
35
+ "model_type": "gemma2",
36
+ "num_hidden_layers": 26,
37
+ "num_image_tokens": 256,
38
+ "num_key_value_heads": 4,
39
+ "tie_word_embeddings": false,
40
+ "torch_dtype": "bfloat16",
41
+ "vocab_size": 265347
42
+ },
43
+ "torch_dtype": "bfloat16",
44
+ "transformers_version": "4.47.0",
45
+ "use_spatial_token": true,
46
+ "use_vision_zoe": true,
47
+ "vision_config": {
48
+ "hidden_size": 1152,
49
+ "intermediate_size": 4304,
50
+ "model_type": "siglip_vision_model",
51
+ "num_attention_heads": 16,
52
+ "num_hidden_layers": 27,
53
+ "num_image_tokens": 256,
54
+ "num_positions": 256,
55
+ "patch_size": 14,
56
+ "projection_dim": 2304,
57
+ "torch_dtype": "bfloat16",
58
+ "vision_use_head": false
59
+ },
60
+ "vision_zoe_config": {
61
+ "_attn_implementation_autoset": true,
62
+ "_name_or_path": "Intel/zoedepth-nyu-kitti",
63
+ "add_cross_attention": false,
64
+ "add_projection": false,
65
+ "architectures": [
66
+ "ZoeDepthForDepthEstimation"
67
+ ],
68
+ "attractor_alpha": 1000,
69
+ "attractor_gamma": 2,
70
+ "attractor_kind": "mean",
71
+ "backbone": null,
72
+ "backbone_config": {
73
+ "_attn_implementation_autoset": false,
74
+ "_name_or_path": "",
75
+ "add_cross_attention": false,
76
+ "add_fpn": false,
77
+ "architectures": null,
78
+ "attention_probs_dropout_prob": 0.0,
79
+ "auxiliary_channels": 256,
80
+ "auxiliary_concat_input": false,
81
+ "auxiliary_loss_weight": 0.4,
82
+ "auxiliary_num_convs": 1,
83
+ "bad_words_ids": null,
84
+ "begin_suppress_tokens": null,
85
+ "bos_token_id": null,
86
+ "chunk_size_feed_forward": 0,
87
+ "cross_attention_hidden_size": null,
88
+ "decoder_start_token_id": null,
89
+ "diversity_penalty": 0.0,
90
+ "do_sample": false,
91
+ "drop_path_rate": 0.1,
92
+ "early_stopping": false,
93
+ "encoder_no_repeat_ngram_size": 0,
94
+ "eos_token_id": null,
95
+ "exponential_decay_length_penalty": null,
96
+ "finetuning_task": null,
97
+ "forced_bos_token_id": null,
98
+ "forced_eos_token_id": null,
99
+ "hidden_act": "gelu",
100
+ "hidden_dropout_prob": 0.0,
101
+ "hidden_size": 1024,
102
+ "id2label": {
103
+ "0": "LABEL_0",
104
+ "1": "LABEL_1"
105
+ },
106
+ "image_size": 384,
107
+ "initializer_range": 0.02,
108
+ "intermediate_size": 4096,
109
+ "is_decoder": false,
110
+ "is_encoder_decoder": false,
111
+ "label2id": {
112
+ "LABEL_0": 0,
113
+ "LABEL_1": 1
114
+ },
115
+ "layer_norm_eps": 1e-12,
116
+ "layer_scale_init_value": 0.1,
117
+ "length_penalty": 1.0,
118
+ "max_length": 20,
119
+ "min_length": 0,
120
+ "model_type": "beit",
121
+ "no_repeat_ngram_size": 0,
122
+ "num_attention_heads": 16,
123
+ "num_beam_groups": 1,
124
+ "num_beams": 1,
125
+ "num_channels": 3,
126
+ "num_hidden_layers": 24,
127
+ "num_return_sequences": 1,
128
+ "out_features": [
129
+ "stage6",
130
+ "stage12",
131
+ "stage18",
132
+ "stage24"
133
+ ],
134
+ "out_indices": [
135
+ 6,
136
+ 12,
137
+ 18,
138
+ 24
139
+ ],
140
+ "output_attentions": false,
141
+ "output_hidden_states": false,
142
+ "output_scores": false,
143
+ "pad_token_id": null,
144
+ "patch_size": 16,
145
+ "pool_scales": [
146
+ 1,
147
+ 2,
148
+ 3,
149
+ 6
150
+ ],
151
+ "prefix": null,
152
+ "problem_type": null,
153
+ "pruned_heads": {},
154
+ "remove_invalid_values": false,
155
+ "repetition_penalty": 1.0,
156
+ "reshape_hidden_states": false,
157
+ "return_dict": true,
158
+ "return_dict_in_generate": false,
159
+ "semantic_loss_ignore_index": 255,
160
+ "sep_token_id": null,
161
+ "stage_names": [
162
+ "stem",
163
+ "stage1",
164
+ "stage2",
165
+ "stage3",
166
+ "stage4",
167
+ "stage5",
168
+ "stage6",
169
+ "stage7",
170
+ "stage8",
171
+ "stage9",
172
+ "stage10",
173
+ "stage11",
174
+ "stage12",
175
+ "stage13",
176
+ "stage14",
177
+ "stage15",
178
+ "stage16",
179
+ "stage17",
180
+ "stage18",
181
+ "stage19",
182
+ "stage20",
183
+ "stage21",
184
+ "stage22",
185
+ "stage23",
186
+ "stage24"
187
+ ],
188
+ "suppress_tokens": null,
189
+ "task_specific_params": null,
190
+ "temperature": 1.0,
191
+ "tf_legacy_loss": false,
192
+ "tie_encoder_decoder": false,
193
+ "tie_word_embeddings": true,
194
+ "tokenizer_class": null,
195
+ "top_k": 50,
196
+ "top_p": 1.0,
197
+ "torch_dtype": null,
198
+ "torchscript": false,
199
+ "typical_p": 1.0,
200
+ "use_absolute_position_embeddings": false,
201
+ "use_auxiliary_head": true,
202
+ "use_bfloat16": false,
203
+ "use_mask_token": false,
204
+ "use_mean_pooling": true,
205
+ "use_relative_position_bias": true,
206
+ "use_shared_relative_position_bias": false,
207
+ "vocab_size": 8192
208
+ },
209
+ "backbone_hidden_size": 1024,
210
+ "bad_words_ids": null,
211
+ "batch_norm_eps": 1e-05,
212
+ "begin_suppress_tokens": null,
213
+ "bin_centers_type": "softplus",
214
+ "bin_configurations": [
215
+ {
216
+ "max_depth": 10.0,
217
+ "min_depth": 0.001,
218
+ "n_bins": 64,
219
+ "name": "nyu"
220
+ },
221
+ {
222
+ "max_depth": 80.0,
223
+ "min_depth": 0.001,
224
+ "n_bins": 64,
225
+ "name": "kitti"
226
+ }
227
+ ],
228
+ "bin_embedding_dim": 128,
229
+ "bos_token_id": null,
230
+ "bottleneck_features": 256,
231
+ "chunk_size_feed_forward": 0,
232
+ "cross_attention_hidden_size": null,
233
+ "decoder_start_token_id": null,
234
+ "diversity_penalty": 0.0,
235
+ "do_sample": false,
236
+ "early_stopping": false,
237
+ "encoder_no_repeat_ngram_size": 0,
238
+ "eos_token_id": null,
239
+ "exponential_decay_length_penalty": null,
240
+ "finetuning_task": null,
241
+ "forced_bos_token_id": null,
242
+ "forced_eos_token_id": null,
243
+ "fusion_hidden_size": 256,
244
+ "head_in_index": -1,
245
+ "hidden_act": "gelu",
246
+ "id2label": {
247
+ "0": "LABEL_0",
248
+ "1": "LABEL_1"
249
+ },
250
+ "initializer_range": 0.02,
251
+ "is_decoder": false,
252
+ "is_encoder_decoder": false,
253
+ "label2id": {
254
+ "LABEL_0": 0,
255
+ "LABEL_1": 1
256
+ },
257
+ "length_penalty": 1.0,
258
+ "max_length": 20,
259
+ "max_temp": 50.0,
260
+ "min_length": 0,
261
+ "min_temp": 0.0212,
262
+ "model_type": "zoedepth",
263
+ "neck_hidden_sizes": [
264
+ 256,
265
+ 512,
266
+ 1024,
267
+ 1024
268
+ ],
269
+ "no_repeat_ngram_size": 0,
270
+ "num_attractors": [
271
+ 16,
272
+ 8,
273
+ 4,
274
+ 1
275
+ ],
276
+ "num_beam_groups": 1,
277
+ "num_beams": 1,
278
+ "num_patch_transformer_layers": 4,
279
+ "num_relative_features": 32,
280
+ "num_return_sequences": 1,
281
+ "output_attentions": false,
282
+ "output_hidden_states": false,
283
+ "output_scores": false,
284
+ "pad_token_id": null,
285
+ "patch_transformer_hidden_size": 128,
286
+ "patch_transformer_intermediate_size": 1024,
287
+ "patch_transformer_num_attention_heads": 4,
288
+ "prefix": null,
289
+ "problem_type": null,
290
+ "pruned_heads": {},
291
+ "readout_type": "project",
292
+ "reassemble_factors": [
293
+ 4,
294
+ 2,
295
+ 1,
296
+ 0.5
297
+ ],
298
+ "remove_invalid_values": false,
299
+ "repetition_penalty": 1.0,
300
+ "return_dict": true,
301
+ "return_dict_in_generate": false,
302
+ "sep_token_id": null,
303
+ "suppress_tokens": null,
304
+ "task_specific_params": null,
305
+ "temperature": 1.0,
306
+ "tf_legacy_loss": false,
307
+ "tie_encoder_decoder": false,
308
+ "tie_word_embeddings": true,
309
+ "tokenizer_class": null,
310
+ "top_k": 50,
311
+ "top_p": 1.0,
312
+ "torch_dtype": "bfloat16",
313
+ "torchscript": false,
314
+ "typical_p": 1.0,
315
+ "use_batch_norm_in_fusion_residual": false,
316
+ "use_bfloat16": false,
317
+ "use_bias_in_fusion_residual": null,
318
+ "use_pretrained_backbone": false
319
+ }
320
+ }
configuration_spatialvla.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+ # Copyright (c) 2025 IPEC at Shanghai AI Laboratory
3
+ # Permission is hereby granted, free of charge, to use, copy, modify, merge, publish,
4
+ # distribute, sublicense, and/or sell copies of the Software, subject to the following conditions:
5
+ # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
6
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND.
7
+ # Based on code licensed under the Apache License, Version 2.0 by Google Inc. and HuggingFace Inc. team (Copyright 2024).
8
+ # coding=utf-8
9
+
10
+ """PaliGemmamodel configuration"""
11
+
12
+ import warnings
13
+
14
+ from transformers.configuration_utils import PretrainedConfig
15
+ from transformers.utils import logging
16
+ from transformers import CONFIG_MAPPING, AutoConfig
17
+
18
+
19
+ logger = logging.get_logger(__name__)
20
+
21
+
22
+ class SpatialVLAConfig(PretrainedConfig):
23
+ r"""
24
+ This is the configuration class to store the configuration of a [`PaliGemmaForConditionalGeneration`]. It is used to instantiate an
25
+ PaliGemmamodel according to the specified arguments, defining the model architecture. Instantiating a configuration
26
+ with the defaults will yield a similar configuration to that of the PaliGemma-2B.
27
+
28
+ e.g. [paligemma-hf/paligemma-2b](https://huggingface.co/paligemma-hf/paligemma-2b)
29
+
30
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
31
+ documentation from [`PretrainedConfig`] for more information.
32
+
33
+ Args:
34
+ vision_config (`PaliGemmaVisionConfig`, *optional*):
35
+ Custom vision config or dict
36
+ text_config (`Union[AutoConfig, dict]`, *optional*):
37
+ The config object of the text backbone. Can be any of `LlamaConfig` or `MistralConfig`.
38
+ ignore_index (`int`, *optional*, defaults to -100):
39
+ The ignore index for the loss function.
40
+ image_token_index (`int`, *optional*, defaults to 256000):
41
+ The image token index to encode the image prompt.
42
+ vocab_size (`int`, *optional*, defaults to 257152):
43
+ Vocabulary size of the PaliGemmamodel. Defines the number of different tokens that can be represented by the
44
+ `inputs_ids` passed when calling [`~PaliGemmaForConditionalGeneration`]
45
+ projection_dim (`int`, *optional*, defaults to 2048):
46
+ Dimension of the multimodal projection space.
47
+ hidden_size (`int`, *optional*, defaults to 2048):
48
+ Dimension of the hidden layer of the Language model.
49
+
50
+ Example:
51
+
52
+ ```python
53
+ >>> from transformers import PaliGemmaForConditionalGeneration, PaliGemmaConfig, SiglipVisionConfig, GemmaConfig
54
+
55
+ >>> # Initializing a Siglip-like vision config
56
+ >>> vision_config = SiglipVisionConfig()
57
+
58
+ >>> # Initializing a PaliGemma config
59
+ >>> text_config = GemmaConfig()
60
+
61
+ >>> # Initializing a PaliGemma paligemma-3b-224 style configuration
62
+ >>> configuration = PaliGemmaConfig(vision_config, text_config)
63
+
64
+ >>> # Initializing a model from the paligemma-3b-224 style configuration
65
+ >>> model = PaliGemmaForConditionalGeneration(configuration)
66
+
67
+ >>> # Accessing the model configuration
68
+ >>> configuration = model.config
69
+ ```"""
70
+
71
+ model_type = "spatialvla"
72
+ sub_configs = {"text_config": AutoConfig, "vision_config": AutoConfig, "vision_zoe_config": AutoConfig}
73
+
74
+ def __init__(
75
+ self,
76
+ vision_config=None,
77
+ text_config=None,
78
+ ignore_index=-100,
79
+ image_token_index=256000,
80
+ vocab_size=257152,
81
+ projection_dim=2048,
82
+ hidden_size=2048,
83
+ vision_zoe_config=None,
84
+ action_token_begin_idx=None,
85
+ spatial_token_num=259,
86
+ use_spatial_token=False,
87
+ ego3d_patch_reso=4,
88
+ n_freqs=8,
89
+ use_vision_zoe=True,
90
+ # wrap_lora=False,
91
+ **kwargs,
92
+ ):
93
+ self._ignore_index = ignore_index
94
+ self.image_token_index = image_token_index
95
+ self._vocab_size = vocab_size
96
+ self.projection_dim = projection_dim
97
+ self.hidden_size = hidden_size
98
+ self.vision_config = vision_config
99
+ self.is_encoder_decoder = False
100
+
101
+ if isinstance(self.vision_config, dict):
102
+ vision_config["model_type"] = (
103
+ vision_config["model_type"] if "model_type" in vision_config else "siglip_vision_model"
104
+ )
105
+ self.vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config)
106
+ elif vision_config is None:
107
+ self.vision_config = CONFIG_MAPPING["siglip_vision_model"](
108
+ intermediate_size=4096,
109
+ hidden_size=1152,
110
+ patch_size=14,
111
+ image_size=224,
112
+ num_hidden_layers=27,
113
+ num_attention_heads=16,
114
+ vocab_size=257152,
115
+ vision_use_head=False,
116
+ )
117
+
118
+ self.text_config = text_config
119
+ if isinstance(self.text_config, dict):
120
+ text_config["model_type"] = text_config["model_type"] if "model_type" in text_config else "gemma2"
121
+ self.text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
122
+ elif text_config is None:
123
+ self.text_config = CONFIG_MAPPING["gemma2"](
124
+ hidden_size=2048,
125
+ num_hidden_layers=18,
126
+ intermediate_size=16384,
127
+ num_attention_heads=8,
128
+ num_key_value_heads=1,
129
+ is_encoder_decoder=False,
130
+ vocab_size=vocab_size,
131
+ )
132
+ self.text_config.num_image_tokens = (self.vision_config.image_size // self.vision_config.patch_size) ** 2
133
+ self.vision_config.projection_dim = projection_dim
134
+
135
+ # vision zoe config
136
+ self.vision_zoe_config = vision_zoe_config
137
+ if isinstance(self.vision_zoe_config, dict):
138
+ vision_zoe_config["model_type"] = vision_zoe_config["model_type"] if "model_type" in vision_zoe_config else "zoedepth"
139
+ self.vision_zoe_config = CONFIG_MAPPING[vision_zoe_config["model_type"]](**vision_zoe_config)
140
+ else:
141
+ print(f"🔥 init from default configurations ... {self.vision_zoe_config}")
142
+ # BUG: initializing zoe in default cause key error
143
+ # self.vision_zoe_config = CONFIG_MAPPING["zoedepth"]()
144
+ pass
145
+
146
+ # NOTE: additional attributes
147
+ self.action_token_begin_idx = action_token_begin_idx
148
+ self.spatial_token_num = spatial_token_num
149
+ self.use_spatial_token = use_spatial_token
150
+ self.ego3d_patch_reso = ego3d_patch_reso
151
+ self.n_freqs = n_freqs
152
+ self.use_vision_zoe = use_vision_zoe
153
+ # self.wrap_lora = wrap_lora
154
+
155
+ super().__init__(**kwargs)
156
+
157
+ @property
158
+ def ignore_index(self):
159
+ warnings.warn(
160
+ "The `ignore_index` attribute is deprecated and will be removed in v4.47.",
161
+ FutureWarning,
162
+ )
163
+ return self._ignore_index
164
+
165
+ @ignore_index.setter
166
+ def ignore_index(self, value):
167
+ self._ignore_index = value
168
+
169
+ def to_dict(self):
170
+ output = super().to_dict()
171
+ output.pop("_ignore_index", None)
172
+ return output
dataset_statistics.json ADDED
@@ -0,0 +1,3502 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "fractal20220817_data/0.1.0": {
3
+ "action": {
4
+ "mean": [
5
+ 0.006987507455050945,
6
+ 0.006265853065997362,
7
+ -0.012625162489712238,
8
+ 0.04333285242319107,
9
+ -0.005756276659667492,
10
+ 0.0009130403632298112,
11
+ 0.5354204773902893
12
+ ],
13
+ "std": [
14
+ 0.06921109557151794,
15
+ 0.05970889702439308,
16
+ 0.0735311210155487,
17
+ 0.1561058759689331,
18
+ 0.1316441297531128,
19
+ 0.14593777060508728,
20
+ 0.49711623787879944
21
+ ],
22
+ "max": [
23
+ 2.9984593391418457,
24
+ 22.09052848815918,
25
+ 2.7507524490356445,
26
+ 1.570636510848999,
27
+ 1.5321086645126343,
28
+ 1.5691522359848022,
29
+ 1.0
30
+ ],
31
+ "min": [
32
+ -2.0204520225524902,
33
+ -5.497899532318115,
34
+ -2.031663417816162,
35
+ -1.569917917251587,
36
+ -1.569892168045044,
37
+ -1.570419430732727,
38
+ 0.0
39
+ ],
40
+ "q01": [
41
+ -0.22453527510166169,
42
+ -0.14820013284683228,
43
+ -0.231589707583189,
44
+ -0.3517994859814644,
45
+ -0.4193011274933815,
46
+ -0.43643461108207704,
47
+ 0.0
48
+ ],
49
+ "q99": [
50
+ 0.17824687153100965,
51
+ 0.14938379630446405,
52
+ 0.21842354819178575,
53
+ 0.5892666035890578,
54
+ 0.35272657424211445,
55
+ 0.44796681255102094,
56
+ 1.0
57
+ ],
58
+ "mask": [
59
+ true,
60
+ true,
61
+ true,
62
+ true,
63
+ true,
64
+ true,
65
+ false
66
+ ]
67
+ },
68
+ "proprio": {
69
+ "mean": [
70
+ 0.0,
71
+ 0.0,
72
+ 0.0,
73
+ 0.0,
74
+ 0.0,
75
+ 0.0,
76
+ 0.0
77
+ ],
78
+ "std": [
79
+ 0.0,
80
+ 0.0,
81
+ 0.0,
82
+ 0.0,
83
+ 0.0,
84
+ 0.0,
85
+ 0.0
86
+ ],
87
+ "max": [
88
+ 0.0,
89
+ 0.0,
90
+ 0.0,
91
+ 0.0,
92
+ 0.0,
93
+ 0.0,
94
+ 0.0
95
+ ],
96
+ "min": [
97
+ 0.0,
98
+ 0.0,
99
+ 0.0,
100
+ 0.0,
101
+ 0.0,
102
+ 0.0,
103
+ 0.0
104
+ ],
105
+ "q01": [
106
+ 0.0,
107
+ 0.0,
108
+ 0.0,
109
+ 0.0,
110
+ 0.0,
111
+ 0.0,
112
+ 0.0
113
+ ],
114
+ "q99": [
115
+ 0.0,
116
+ 0.0,
117
+ 0.0,
118
+ 0.0,
119
+ 0.0,
120
+ 0.0,
121
+ 0.0
122
+ ]
123
+ },
124
+ "num_transitions": 3786400,
125
+ "num_trajectories": 87212
126
+ },
127
+ "kuka/0.1.0": {
128
+ "action": {
129
+ "mean": [
130
+ -0.00046687963185831904,
131
+ 0.00040137648466043174,
132
+ -0.0012807906605303288,
133
+ 0.0,
134
+ 0.0,
135
+ -0.037225183099508286,
136
+ 0.4131543040275574
137
+ ],
138
+ "std": [
139
+ 0.020832739770412445,
140
+ 0.029158642515540123,
141
+ 0.0642285868525505,
142
+ 0.0,
143
+ 0.0,
144
+ 0.14224639534950256,
145
+ 0.4908643662929535
146
+ ],
147
+ "max": [
148
+ 0.1697135865688324,
149
+ 0.2777623236179352,
150
+ 0.43710532784461975,
151
+ 0.0,
152
+ 0.0,
153
+ 1.9684287309646606,
154
+ 1.0
155
+ ],
156
+ "min": [
157
+ -0.159867063164711,
158
+ -0.2892282009124756,
159
+ -0.2795473635196686,
160
+ 0.0,
161
+ 0.0,
162
+ -1.9875637292861938,
163
+ 0.0
164
+ ],
165
+ "q01": [
166
+ -0.06619441494345665,
167
+ -0.08713878810405731,
168
+ -0.15083016991615295,
169
+ 0.0,
170
+ 0.0,
171
+ -0.5415697038173676,
172
+ 0.0
173
+ ],
174
+ "q99": [
175
+ 0.06601839080452929,
176
+ 0.08732476785779003,
177
+ 0.18168179214000715,
178
+ 0.0,
179
+ 0.0,
180
+ 0.2923380345106127,
181
+ 1.0
182
+ ],
183
+ "mask": [
184
+ true,
185
+ true,
186
+ true,
187
+ true,
188
+ true,
189
+ true,
190
+ false
191
+ ]
192
+ },
193
+ "proprio": {
194
+ "mean": [
195
+ 0.0,
196
+ 0.0,
197
+ 0.0,
198
+ 0.0,
199
+ 0.0,
200
+ 0.0,
201
+ 0.0
202
+ ],
203
+ "std": [
204
+ 0.0,
205
+ 0.0,
206
+ 0.0,
207
+ 0.0,
208
+ 0.0,
209
+ 0.0,
210
+ 0.0
211
+ ],
212
+ "max": [
213
+ 0.0,
214
+ 0.0,
215
+ 0.0,
216
+ 0.0,
217
+ 0.0,
218
+ 0.0,
219
+ 0.0
220
+ ],
221
+ "min": [
222
+ 0.0,
223
+ 0.0,
224
+ 0.0,
225
+ 0.0,
226
+ 0.0,
227
+ 0.0,
228
+ 0.0
229
+ ],
230
+ "q01": [
231
+ 0.0,
232
+ 0.0,
233
+ 0.0,
234
+ 0.0,
235
+ 0.0,
236
+ 0.0,
237
+ 0.0
238
+ ],
239
+ "q99": [
240
+ 0.0,
241
+ 0.0,
242
+ 0.0,
243
+ 0.0,
244
+ 0.0,
245
+ 0.0,
246
+ 0.0
247
+ ]
248
+ },
249
+ "num_transitions": 2455879,
250
+ "num_trajectories": 209880
251
+ },
252
+ "bridge_orig/1.0.0": {
253
+ "action": {
254
+ "mean": [
255
+ 0.00023341714404523373,
256
+ 0.00013004327774979174,
257
+ -0.00012762591359205544,
258
+ -0.0001556579809403047,
259
+ -0.00040393328526988626,
260
+ 0.00023558337124995887,
261
+ 0.5764582753181458
262
+ ],
263
+ "std": [
264
+ 0.009765734896063805,
265
+ 0.013689505867660046,
266
+ 0.012667152099311352,
267
+ 0.028534479439258575,
268
+ 0.03063790127635002,
269
+ 0.07691770792007446,
270
+ 0.4973658621311188
271
+ ],
272
+ "max": [
273
+ 0.41691166162490845,
274
+ 0.25864794850349426,
275
+ 0.21218234300613403,
276
+ 3.122201919555664,
277
+ 1.8618112802505493,
278
+ 6.280478477478027,
279
+ 1.0
280
+ ],
281
+ "min": [
282
+ -0.4007510244846344,
283
+ -0.13874775171279907,
284
+ -0.22553899884223938,
285
+ -3.2010786533355713,
286
+ -1.8618112802505493,
287
+ -6.279075622558594,
288
+ 0.0
289
+ ],
290
+ "q01": [
291
+ -0.02872725307941437,
292
+ -0.04170349963009357,
293
+ -0.026093858778476715,
294
+ -0.08092105075716972,
295
+ -0.09288699507713317,
296
+ -0.20718276381492615,
297
+ 0.0
298
+ ],
299
+ "q99": [
300
+ 0.028309678435325586,
301
+ 0.040855254605412394,
302
+ 0.040161586627364146,
303
+ 0.08192047759890528,
304
+ 0.07792850524187081,
305
+ 0.20382574498653397,
306
+ 1.0
307
+ ],
308
+ "mask": [
309
+ true,
310
+ true,
311
+ true,
312
+ true,
313
+ true,
314
+ true,
315
+ false
316
+ ]
317
+ },
318
+ "proprio": {
319
+ "mean": [
320
+ 0.0,
321
+ 0.0,
322
+ 0.0,
323
+ 0.0,
324
+ 0.0,
325
+ 0.0,
326
+ 0.0
327
+ ],
328
+ "std": [
329
+ 0.0,
330
+ 0.0,
331
+ 0.0,
332
+ 0.0,
333
+ 0.0,
334
+ 0.0,
335
+ 0.0
336
+ ],
337
+ "max": [
338
+ 0.0,
339
+ 0.0,
340
+ 0.0,
341
+ 0.0,
342
+ 0.0,
343
+ 0.0,
344
+ 0.0
345
+ ],
346
+ "min": [
347
+ 0.0,
348
+ 0.0,
349
+ 0.0,
350
+ 0.0,
351
+ 0.0,
352
+ 0.0,
353
+ 0.0
354
+ ],
355
+ "q01": [
356
+ 0.0,
357
+ 0.0,
358
+ 0.0,
359
+ 0.0,
360
+ 0.0,
361
+ 0.0,
362
+ 0.0
363
+ ],
364
+ "q99": [
365
+ 0.0,
366
+ 0.0,
367
+ 0.0,
368
+ 0.0,
369
+ 0.0,
370
+ 0.0,
371
+ 0.0
372
+ ]
373
+ },
374
+ "num_transitions": 2135463,
375
+ "num_trajectories": 60064
376
+ },
377
+ "taco_play/0.1.0": {
378
+ "action": {
379
+ "mean": [
380
+ -0.0038459226489067078,
381
+ 0.009671436622738838,
382
+ 0.01278059184551239,
383
+ -0.0054037850350141525,
384
+ -0.009606562554836273,
385
+ -0.0024807206355035305,
386
+ 0.4263913035392761
387
+ ],
388
+ "std": [
389
+ 0.23254045844078064,
390
+ 0.3629826307296753,
391
+ 0.2869291603565216,
392
+ 0.261770635843277,
393
+ 0.24388927221298218,
394
+ 0.5216501355171204,
395
+ 0.49469029903411865
396
+ ],
397
+ "max": [
398
+ 1.4915844202041626,
399
+ 2.1842432022094727,
400
+ 2.6836395263671875,
401
+ 5.035226821899414,
402
+ 2.665864944458008,
403
+ 4.250768661499023,
404
+ 1.0
405
+ ],
406
+ "min": [
407
+ -4.242457866668701,
408
+ -3.192805051803589,
409
+ -1.3371467590332031,
410
+ -4.202683448791504,
411
+ -2.6722638607025146,
412
+ -3.3467135429382324,
413
+ 0.0
414
+ ],
415
+ "q01": [
416
+ -0.7106140398979186,
417
+ -1.056944659948349,
418
+ -0.5878450274467468,
419
+ -0.7682853937149048,
420
+ -0.7180147767066956,
421
+ -1.5527938604354858,
422
+ 0.0
423
+ ],
424
+ "q99": [
425
+ 0.6482916426658629,
426
+ 1.0051310062408447,
427
+ 0.9480248689651489,
428
+ 0.6926478147506714,
429
+ 0.6351067513227462,
430
+ 1.628010264635086,
431
+ 1.0
432
+ ],
433
+ "mask": [
434
+ true,
435
+ true,
436
+ true,
437
+ true,
438
+ true,
439
+ true,
440
+ false
441
+ ]
442
+ },
443
+ "proprio": {
444
+ "mean": [
445
+ 0.0,
446
+ 0.0,
447
+ 0.0,
448
+ 0.0,
449
+ 0.0,
450
+ 0.0,
451
+ 0.0
452
+ ],
453
+ "std": [
454
+ 0.0,
455
+ 0.0,
456
+ 0.0,
457
+ 0.0,
458
+ 0.0,
459
+ 0.0,
460
+ 0.0
461
+ ],
462
+ "max": [
463
+ 0.0,
464
+ 0.0,
465
+ 0.0,
466
+ 0.0,
467
+ 0.0,
468
+ 0.0,
469
+ 0.0
470
+ ],
471
+ "min": [
472
+ 0.0,
473
+ 0.0,
474
+ 0.0,
475
+ 0.0,
476
+ 0.0,
477
+ 0.0,
478
+ 0.0
479
+ ],
480
+ "q01": [
481
+ 0.0,
482
+ 0.0,
483
+ 0.0,
484
+ 0.0,
485
+ 0.0,
486
+ 0.0,
487
+ 0.0
488
+ ],
489
+ "q99": [
490
+ 0.0,
491
+ 0.0,
492
+ 0.0,
493
+ 0.0,
494
+ 0.0,
495
+ 0.0,
496
+ 0.0
497
+ ]
498
+ },
499
+ "num_transitions": 237798,
500
+ "num_trajectories": 3603
501
+ },
502
+ "jaco_play/0.1.0": {
503
+ "action": {
504
+ "mean": [
505
+ 0.0009658387862145901,
506
+ -0.005800850689411163,
507
+ -0.003950685728341341,
508
+ 0.0,
509
+ 0.0,
510
+ 0.0,
511
+ 0.34934908151626587
512
+ ],
513
+ "std": [
514
+ 0.12234985828399658,
515
+ 0.09678783267736435,
516
+ 0.1115543395280838,
517
+ 0.0,
518
+ 0.0,
519
+ 0.0,
520
+ 0.47682321071624756
521
+ ],
522
+ "max": [
523
+ 0.20000000298023224,
524
+ 0.20000000298023224,
525
+ 0.20000000298023224,
526
+ 0.0,
527
+ 0.0,
528
+ 0.0,
529
+ 1.0
530
+ ],
531
+ "min": [
532
+ -0.20000000298023224,
533
+ -0.20000000298023224,
534
+ -0.20000000298023224,
535
+ 0.0,
536
+ 0.0,
537
+ 0.0,
538
+ 0.0
539
+ ],
540
+ "q01": [
541
+ -0.20000000298023224,
542
+ -0.20000000298023224,
543
+ -0.20000000298023224,
544
+ 0.0,
545
+ 0.0,
546
+ 0.0,
547
+ 0.0
548
+ ],
549
+ "q99": [
550
+ 0.20000000298023224,
551
+ 0.20000000298023224,
552
+ 0.20000000298023224,
553
+ 0.0,
554
+ 0.0,
555
+ 0.0,
556
+ 1.0
557
+ ],
558
+ "mask": [
559
+ true,
560
+ true,
561
+ true,
562
+ true,
563
+ true,
564
+ true,
565
+ false
566
+ ]
567
+ },
568
+ "proprio": {
569
+ "mean": [
570
+ 0.0,
571
+ 0.0,
572
+ 0.0,
573
+ 0.0,
574
+ 0.0,
575
+ 0.0,
576
+ 0.0
577
+ ],
578
+ "std": [
579
+ 0.0,
580
+ 0.0,
581
+ 0.0,
582
+ 0.0,
583
+ 0.0,
584
+ 0.0,
585
+ 0.0
586
+ ],
587
+ "max": [
588
+ 0.0,
589
+ 0.0,
590
+ 0.0,
591
+ 0.0,
592
+ 0.0,
593
+ 0.0,
594
+ 0.0
595
+ ],
596
+ "min": [
597
+ 0.0,
598
+ 0.0,
599
+ 0.0,
600
+ 0.0,
601
+ 0.0,
602
+ 0.0,
603
+ 0.0
604
+ ],
605
+ "q01": [
606
+ 0.0,
607
+ 0.0,
608
+ 0.0,
609
+ 0.0,
610
+ 0.0,
611
+ 0.0,
612
+ 0.0
613
+ ],
614
+ "q99": [
615
+ 0.0,
616
+ 0.0,
617
+ 0.0,
618
+ 0.0,
619
+ 0.0,
620
+ 0.0,
621
+ 0.0
622
+ ]
623
+ },
624
+ "num_transitions": 77965,
625
+ "num_trajectories": 1085
626
+ },
627
+ "berkeley_cable_routing/0.1.0": {
628
+ "action": {
629
+ "mean": [
630
+ -0.07139858603477478,
631
+ 0.023608991876244545,
632
+ 0.10241956263780594,
633
+ 0.0,
634
+ 0.0,
635
+ 0.04967105761170387,
636
+ 0.0
637
+ ],
638
+ "std": [
639
+ 0.18155010044574738,
640
+ 0.18109896779060364,
641
+ 0.21220752596855164,
642
+ 0.0,
643
+ 0.0,
644
+ 0.3475516438484192,
645
+ 0.0
646
+ ],
647
+ "max": [
648
+ 0.9633283019065857,
649
+ 1.0,
650
+ 1.0,
651
+ 0.0,
652
+ 0.0,
653
+ 1.0,
654
+ 0.0
655
+ ],
656
+ "min": [
657
+ -0.9809081554412842,
658
+ -0.9554349184036255,
659
+ -0.9994775056838989,
660
+ 0.0,
661
+ 0.0,
662
+ -1.0,
663
+ 0.0
664
+ ],
665
+ "q01": [
666
+ -0.5534318816661835,
667
+ -0.4797285574674606,
668
+ -0.5314934802055359,
669
+ 0.0,
670
+ 0.0,
671
+ -0.8855219376087189,
672
+ 0.0
673
+ ],
674
+ "q99": [
675
+ 0.42652835428714786,
676
+ 0.5000944086909298,
677
+ 0.639823433756829,
678
+ 0.0,
679
+ 0.0,
680
+ 0.984243879914284,
681
+ 0.0
682
+ ],
683
+ "mask": [
684
+ true,
685
+ true,
686
+ true,
687
+ true,
688
+ true,
689
+ true,
690
+ false
691
+ ]
692
+ },
693
+ "proprio": {
694
+ "mean": [
695
+ 0.0,
696
+ 0.0,
697
+ 0.0,
698
+ 0.0,
699
+ 0.0,
700
+ 0.0,
701
+ 0.0
702
+ ],
703
+ "std": [
704
+ 0.0,
705
+ 0.0,
706
+ 0.0,
707
+ 0.0,
708
+ 0.0,
709
+ 0.0,
710
+ 0.0
711
+ ],
712
+ "max": [
713
+ 0.0,
714
+ 0.0,
715
+ 0.0,
716
+ 0.0,
717
+ 0.0,
718
+ 0.0,
719
+ 0.0
720
+ ],
721
+ "min": [
722
+ 0.0,
723
+ 0.0,
724
+ 0.0,
725
+ 0.0,
726
+ 0.0,
727
+ 0.0,
728
+ 0.0
729
+ ],
730
+ "q01": [
731
+ 0.0,
732
+ 0.0,
733
+ 0.0,
734
+ 0.0,
735
+ 0.0,
736
+ 0.0,
737
+ 0.0
738
+ ],
739
+ "q99": [
740
+ 0.0,
741
+ 0.0,
742
+ 0.0,
743
+ 0.0,
744
+ 0.0,
745
+ 0.0,
746
+ 0.0
747
+ ]
748
+ },
749
+ "num_transitions": 42328,
750
+ "num_trajectories": 1647
751
+ },
752
+ "roboturk/0.1.0": {
753
+ "action": {
754
+ "mean": [
755
+ 0.001444889116100967,
756
+ -0.0015945355407893658,
757
+ -0.0011753803119063377,
758
+ 0.002301239175722003,
759
+ -0.0009382442804053426,
760
+ -0.00011485860886750743,
761
+ 0.5746025443077087
762
+ ],
763
+ "std": [
764
+ 0.0493537075817585,
765
+ 0.06354564428329468,
766
+ 0.06116492301225662,
767
+ 0.0955340564250946,
768
+ 0.08420011401176453,
769
+ 0.06517910957336426,
770
+ 0.4945177137851715
771
+ ],
772
+ "max": [
773
+ 0.39124172925949097,
774
+ 0.4601028263568878,
775
+ 0.4870833456516266,
776
+ 1.816888689994812,
777
+ 1.8240282535552979,
778
+ 1.4824820756912231,
779
+ 1.0
780
+ ],
781
+ "min": [
782
+ -0.6546999216079712,
783
+ -0.6365841031074524,
784
+ -0.4217723608016968,
785
+ -1.6695482730865479,
786
+ -1.8023357391357422,
787
+ -1.4630827903747559,
788
+ 0.0
789
+ ],
790
+ "q01": [
791
+ -0.1342635464668274,
792
+ -0.19996687173843383,
793
+ -0.1482972100377083,
794
+ -0.20720748245716095,
795
+ -0.09676413893699647,
796
+ -0.18075634717941286,
797
+ 0.0
798
+ ],
799
+ "q99": [
800
+ 0.14956976801157001,
801
+ 0.1805950567126275,
802
+ 0.18841815620660796,
803
+ 0.21615413755178453,
804
+ 0.09457383215427405,
805
+ 0.18543301910162005,
806
+ 1.0
807
+ ],
808
+ "mask": [
809
+ true,
810
+ true,
811
+ true,
812
+ true,
813
+ true,
814
+ true,
815
+ false
816
+ ]
817
+ },
818
+ "proprio": {
819
+ "mean": [
820
+ 0.0,
821
+ 0.0,
822
+ 0.0,
823
+ 0.0,
824
+ 0.0,
825
+ 0.0,
826
+ 0.0
827
+ ],
828
+ "std": [
829
+ 0.0,
830
+ 0.0,
831
+ 0.0,
832
+ 0.0,
833
+ 0.0,
834
+ 0.0,
835
+ 0.0
836
+ ],
837
+ "max": [
838
+ 0.0,
839
+ 0.0,
840
+ 0.0,
841
+ 0.0,
842
+ 0.0,
843
+ 0.0,
844
+ 0.0
845
+ ],
846
+ "min": [
847
+ 0.0,
848
+ 0.0,
849
+ 0.0,
850
+ 0.0,
851
+ 0.0,
852
+ 0.0,
853
+ 0.0
854
+ ],
855
+ "q01": [
856
+ 0.0,
857
+ 0.0,
858
+ 0.0,
859
+ 0.0,
860
+ 0.0,
861
+ 0.0,
862
+ 0.0
863
+ ],
864
+ "q99": [
865
+ 0.0,
866
+ 0.0,
867
+ 0.0,
868
+ 0.0,
869
+ 0.0,
870
+ 0.0,
871
+ 0.0
872
+ ]
873
+ },
874
+ "num_transitions": 187507,
875
+ "num_trajectories": 1995
876
+ },
877
+ "viola/0.1.0": {
878
+ "action": {
879
+ "mean": [
880
+ 0.04761853069067001,
881
+ -0.029204534366726875,
882
+ 0.055867329239845276,
883
+ -0.0026185200549662113,
884
+ 0.006867341697216034,
885
+ -0.016821356490254402,
886
+ 0.7323777675628662
887
+ ],
888
+ "std": [
889
+ 0.39157867431640625,
890
+ 0.40765219926834106,
891
+ 0.40077903866767883,
892
+ 0.10023998469114304,
893
+ 0.08443189412355423,
894
+ 0.10375089943408966,
895
+ 0.442600816488266
896
+ ],
897
+ "max": [
898
+ 1.0,
899
+ 1.0,
900
+ 1.0,
901
+ 0.375,
902
+ 0.36321428418159485,
903
+ 0.375,
904
+ 1.0
905
+ ],
906
+ "min": [
907
+ -1.0,
908
+ -1.0,
909
+ -1.0,
910
+ -0.375,
911
+ -0.375,
912
+ -0.375,
913
+ 0.0
914
+ ],
915
+ "q01": [
916
+ -0.9628571271896362,
917
+ -1.0,
918
+ -1.0,
919
+ -0.26249998807907104,
920
+ -0.21321429312229156,
921
+ -0.3385714292526245,
922
+ 0.0
923
+ ],
924
+ "q99": [
925
+ 0.9114285707473755,
926
+ 0.868571400642395,
927
+ 1.0,
928
+ 0.2817857265472412,
929
+ 0.2239285707473755,
930
+ 0.3557142913341522,
931
+ 1.0
932
+ ],
933
+ "mask": [
934
+ true,
935
+ true,
936
+ true,
937
+ true,
938
+ true,
939
+ true,
940
+ false
941
+ ]
942
+ },
943
+ "proprio": {
944
+ "mean": [
945
+ 0.0,
946
+ 0.0,
947
+ 0.0,
948
+ 0.0,
949
+ 0.0,
950
+ 0.0,
951
+ 0.0
952
+ ],
953
+ "std": [
954
+ 0.0,
955
+ 0.0,
956
+ 0.0,
957
+ 0.0,
958
+ 0.0,
959
+ 0.0,
960
+ 0.0
961
+ ],
962
+ "max": [
963
+ 0.0,
964
+ 0.0,
965
+ 0.0,
966
+ 0.0,
967
+ 0.0,
968
+ 0.0,
969
+ 0.0
970
+ ],
971
+ "min": [
972
+ 0.0,
973
+ 0.0,
974
+ 0.0,
975
+ 0.0,
976
+ 0.0,
977
+ 0.0,
978
+ 0.0
979
+ ],
980
+ "q01": [
981
+ 0.0,
982
+ 0.0,
983
+ 0.0,
984
+ 0.0,
985
+ 0.0,
986
+ 0.0,
987
+ 0.0
988
+ ],
989
+ "q99": [
990
+ 0.0,
991
+ 0.0,
992
+ 0.0,
993
+ 0.0,
994
+ 0.0,
995
+ 0.0,
996
+ 0.0
997
+ ]
998
+ },
999
+ "num_transitions": 76324,
1000
+ "num_trajectories": 150
1001
+ },
1002
+ "berkeley_autolab_ur5/0.1.0": {
1003
+ "action": {
1004
+ "mean": [
1005
+ 0.0005683613708242774,
1006
+ 0.0012176961172372103,
1007
+ -0.0005296385497786105,
1008
+ 0.00021029777417425066,
1009
+ 6.069485243642703e-05,
1010
+ 0.0012049867073073983,
1011
+ 0.6298308372497559
1012
+ ],
1013
+ "std": [
1014
+ 0.011533073149621487,
1015
+ 0.007990497164428234,
1016
+ 0.009577799588441849,
1017
+ 0.009432999417185783,
1018
+ 0.016427574679255486,
1019
+ 0.011054049246013165,
1020
+ 0.482679545879364
1021
+ ],
1022
+ "max": [
1023
+ 0.019999999552965164,
1024
+ 0.019999999552965164,
1025
+ 0.019999999552965164,
1026
+ 0.06666667014360428,
1027
+ 0.06666667014360428,
1028
+ 0.06666667014360428,
1029
+ 1.0
1030
+ ],
1031
+ "min": [
1032
+ -0.019999999552965164,
1033
+ -0.019999999552965164,
1034
+ -0.019999999552965164,
1035
+ -0.06666667014360428,
1036
+ -0.06666667014360428,
1037
+ -0.06666667014360428,
1038
+ 0.0
1039
+ ],
1040
+ "q01": [
1041
+ -0.019999999552965164,
1042
+ -0.019999999552965164,
1043
+ -0.019999999552965164,
1044
+ -0.02628571353852749,
1045
+ -0.06666667014360428,
1046
+ -0.03847619146108627,
1047
+ 0.0
1048
+ ],
1049
+ "q99": [
1050
+ 0.019999999552965164,
1051
+ 0.019999999552965164,
1052
+ 0.019999999552965164,
1053
+ 0.031809523701667786,
1054
+ 0.06666667014360428,
1055
+ 0.036571428179740906,
1056
+ 1.0
1057
+ ],
1058
+ "mask": [
1059
+ true,
1060
+ true,
1061
+ true,
1062
+ true,
1063
+ true,
1064
+ true,
1065
+ false
1066
+ ]
1067
+ },
1068
+ "proprio": {
1069
+ "mean": [
1070
+ 0.0,
1071
+ 0.0,
1072
+ 0.0,
1073
+ 0.0,
1074
+ 0.0,
1075
+ 0.0,
1076
+ 0.0
1077
+ ],
1078
+ "std": [
1079
+ 0.0,
1080
+ 0.0,
1081
+ 0.0,
1082
+ 0.0,
1083
+ 0.0,
1084
+ 0.0,
1085
+ 0.0
1086
+ ],
1087
+ "max": [
1088
+ 0.0,
1089
+ 0.0,
1090
+ 0.0,
1091
+ 0.0,
1092
+ 0.0,
1093
+ 0.0,
1094
+ 0.0
1095
+ ],
1096
+ "min": [
1097
+ 0.0,
1098
+ 0.0,
1099
+ 0.0,
1100
+ 0.0,
1101
+ 0.0,
1102
+ 0.0,
1103
+ 0.0
1104
+ ],
1105
+ "q01": [
1106
+ 0.0,
1107
+ 0.0,
1108
+ 0.0,
1109
+ 0.0,
1110
+ 0.0,
1111
+ 0.0,
1112
+ 0.0
1113
+ ],
1114
+ "q99": [
1115
+ 0.0,
1116
+ 0.0,
1117
+ 0.0,
1118
+ 0.0,
1119
+ 0.0,
1120
+ 0.0,
1121
+ 0.0
1122
+ ]
1123
+ },
1124
+ "num_transitions": 97939,
1125
+ "num_trajectories": 1000
1126
+ },
1127
+ "toto/0.1.0": {
1128
+ "action": {
1129
+ "mean": [
1130
+ 0.3854214549064636,
1131
+ 0.007769507821649313,
1132
+ 0.3632742166519165,
1133
+ -0.665202796459198,
1134
+ 0.1890396624803543,
1135
+ 0.0329875648021698,
1136
+ 0.0
1137
+ ],
1138
+ "std": [
1139
+ 0.12211630493402481,
1140
+ 0.19378569722175598,
1141
+ 0.10178232192993164,
1142
+ 0.5725256204605103,
1143
+ 0.298846036195755,
1144
+ 0.32599160075187683,
1145
+ 0.0
1146
+ ],
1147
+ "max": [
1148
+ 0.6839867234230042,
1149
+ 0.4454185664653778,
1150
+ 0.7984078526496887,
1151
+ 2.120781660079956,
1152
+ 1.371164321899414,
1153
+ 1.4118704795837402,
1154
+ 0.0
1155
+ ],
1156
+ "min": [
1157
+ 0.09922284632921219,
1158
+ -0.5180193781852722,
1159
+ 0.13791072368621826,
1160
+ -2.635117530822754,
1161
+ -1.0734480619430542,
1162
+ -1.9282547235488892,
1163
+ 0.0
1164
+ ],
1165
+ "q01": [
1166
+ 0.1756722891330719,
1167
+ -0.3077590811252594,
1168
+ 0.235383919775486,
1169
+ -2.0908505964279174,
1170
+ -0.6191593289375306,
1171
+ -0.7488683319091797,
1172
+ 0.0
1173
+ ],
1174
+ "q99": [
1175
+ 0.6136963081359863,
1176
+ 0.33704194784164443,
1177
+ 0.6681221985816956,
1178
+ 0.7422861719131538,
1179
+ 0.7955395007133507,
1180
+ 0.740464625358582,
1181
+ 0.0
1182
+ ],
1183
+ "mask": [
1184
+ true,
1185
+ true,
1186
+ true,
1187
+ true,
1188
+ true,
1189
+ true,
1190
+ false
1191
+ ]
1192
+ },
1193
+ "proprio": {
1194
+ "mean": [
1195
+ 0.0,
1196
+ 0.0,
1197
+ 0.0,
1198
+ 0.0,
1199
+ 0.0,
1200
+ 0.0,
1201
+ 0.0
1202
+ ],
1203
+ "std": [
1204
+ 0.0,
1205
+ 0.0,
1206
+ 0.0,
1207
+ 0.0,
1208
+ 0.0,
1209
+ 0.0,
1210
+ 0.0
1211
+ ],
1212
+ "max": [
1213
+ 0.0,
1214
+ 0.0,
1215
+ 0.0,
1216
+ 0.0,
1217
+ 0.0,
1218
+ 0.0,
1219
+ 0.0
1220
+ ],
1221
+ "min": [
1222
+ 0.0,
1223
+ 0.0,
1224
+ 0.0,
1225
+ 0.0,
1226
+ 0.0,
1227
+ 0.0,
1228
+ 0.0
1229
+ ],
1230
+ "q01": [
1231
+ 0.0,
1232
+ 0.0,
1233
+ 0.0,
1234
+ 0.0,
1235
+ 0.0,
1236
+ 0.0,
1237
+ 0.0
1238
+ ],
1239
+ "q99": [
1240
+ 0.0,
1241
+ 0.0,
1242
+ 0.0,
1243
+ 0.0,
1244
+ 0.0,
1245
+ 0.0,
1246
+ 0.0
1247
+ ]
1248
+ },
1249
+ "num_transitions": 325699,
1250
+ "num_trajectories": 1003
1251
+ },
1252
+ "language_table/0.1.0": {
1253
+ "action": {
1254
+ "mean": [
1255
+ 0.00014891766477376223,
1256
+ -0.0005636657006107271,
1257
+ 0.0,
1258
+ 0.0,
1259
+ 0.0,
1260
+ 0.0,
1261
+ 1.0
1262
+ ],
1263
+ "std": [
1264
+ 0.030162859708070755,
1265
+ 0.04230763390660286,
1266
+ 0.0,
1267
+ 0.0,
1268
+ 0.0,
1269
+ 0.0,
1270
+ 0.0
1271
+ ],
1272
+ "max": [
1273
+ 0.23357294499874115,
1274
+ 0.24496802687644958,
1275
+ 0.0,
1276
+ 0.0,
1277
+ 0.0,
1278
+ 0.0,
1279
+ 1.0
1280
+ ],
1281
+ "min": [
1282
+ -0.21989956498146057,
1283
+ -0.23736150562763214,
1284
+ 0.0,
1285
+ 0.0,
1286
+ 0.0,
1287
+ 0.0,
1288
+ 1.0
1289
+ ],
1290
+ "q01": [
1291
+ -0.08179590478539467,
1292
+ -0.11795833334326744,
1293
+ 0.0,
1294
+ 0.0,
1295
+ 0.0,
1296
+ 0.0,
1297
+ 1.0
1298
+ ],
1299
+ "q99": [
1300
+ 0.08822273463010788,
1301
+ 0.1191693339496851,
1302
+ 0.0,
1303
+ 0.0,
1304
+ 0.0,
1305
+ 0.0,
1306
+ 1.0
1307
+ ],
1308
+ "mask": [
1309
+ true,
1310
+ true,
1311
+ true,
1312
+ true,
1313
+ true,
1314
+ true,
1315
+ false
1316
+ ]
1317
+ },
1318
+ "proprio": {
1319
+ "mean": [
1320
+ 0.0,
1321
+ 0.0,
1322
+ 0.0,
1323
+ 0.0,
1324
+ 0.0,
1325
+ 0.0,
1326
+ 0.0
1327
+ ],
1328
+ "std": [
1329
+ 0.0,
1330
+ 0.0,
1331
+ 0.0,
1332
+ 0.0,
1333
+ 0.0,
1334
+ 0.0,
1335
+ 0.0
1336
+ ],
1337
+ "max": [
1338
+ 0.0,
1339
+ 0.0,
1340
+ 0.0,
1341
+ 0.0,
1342
+ 0.0,
1343
+ 0.0,
1344
+ 0.0
1345
+ ],
1346
+ "min": [
1347
+ 0.0,
1348
+ 0.0,
1349
+ 0.0,
1350
+ 0.0,
1351
+ 0.0,
1352
+ 0.0,
1353
+ 0.0
1354
+ ],
1355
+ "q01": [
1356
+ 0.0,
1357
+ 0.0,
1358
+ 0.0,
1359
+ 0.0,
1360
+ 0.0,
1361
+ 0.0,
1362
+ 0.0
1363
+ ],
1364
+ "q99": [
1365
+ 0.0,
1366
+ 0.0,
1367
+ 0.0,
1368
+ 0.0,
1369
+ 0.0,
1370
+ 0.0,
1371
+ 0.0
1372
+ ]
1373
+ },
1374
+ "num_transitions": 7045476,
1375
+ "num_trajectories": 442226
1376
+ },
1377
+ "stanford_hydra_dataset_converted_externally_to_rlds/0.1.0": {
1378
+ "action": {
1379
+ "mean": [
1380
+ 0.0007790043600834906,
1381
+ 0.00013707877951674163,
1382
+ -0.000254859565757215,
1383
+ 0.0012903243768960238,
1384
+ -0.004751724191009998,
1385
+ 0.002692892448976636,
1386
+ 0.48855218291282654
1387
+ ],
1388
+ "std": [
1389
+ 0.008022183552384377,
1390
+ 0.009131456725299358,
1391
+ 0.00957438349723816,
1392
+ 0.04122224077582359,
1393
+ 0.03843001648783684,
1394
+ 0.046067025512456894,
1395
+ 0.49978113174438477
1396
+ ],
1397
+ "max": [
1398
+ 0.02499854564666748,
1399
+ 0.02499903365969658,
1400
+ 0.024999922141432762,
1401
+ 0.24974457919597626,
1402
+ 0.24997030198574066,
1403
+ 0.24999946355819702,
1404
+ 1.0
1405
+ ],
1406
+ "min": [
1407
+ -0.024999044835567474,
1408
+ -0.024999700486660004,
1409
+ -0.02499929815530777,
1410
+ -0.24993225932121277,
1411
+ -0.2499666064977646,
1412
+ -0.2499932497739792,
1413
+ 0.0
1414
+ ],
1415
+ "q01": [
1416
+ -0.019992006458342076,
1417
+ -0.02415412735193968,
1418
+ -0.022941758055239916,
1419
+ -0.11085530579090118,
1420
+ -0.12024572037160397,
1421
+ -0.13314770206809043,
1422
+ 0.0
1423
+ ],
1424
+ "q99": [
1425
+ 0.022886231057345868,
1426
+ 0.022358838934451335,
1427
+ 0.02410089675337076,
1428
+ 0.12370114490389822,
1429
+ 0.11323311634361738,
1430
+ 0.18474749639630164,
1431
+ 1.0
1432
+ ],
1433
+ "mask": [
1434
+ true,
1435
+ true,
1436
+ true,
1437
+ true,
1438
+ true,
1439
+ true,
1440
+ false
1441
+ ]
1442
+ },
1443
+ "proprio": {
1444
+ "mean": [
1445
+ 0.0,
1446
+ 0.0,
1447
+ 0.0,
1448
+ 0.0,
1449
+ 0.0,
1450
+ 0.0,
1451
+ 0.0
1452
+ ],
1453
+ "std": [
1454
+ 0.0,
1455
+ 0.0,
1456
+ 0.0,
1457
+ 0.0,
1458
+ 0.0,
1459
+ 0.0,
1460
+ 0.0
1461
+ ],
1462
+ "max": [
1463
+ 0.0,
1464
+ 0.0,
1465
+ 0.0,
1466
+ 0.0,
1467
+ 0.0,
1468
+ 0.0,
1469
+ 0.0
1470
+ ],
1471
+ "min": [
1472
+ 0.0,
1473
+ 0.0,
1474
+ 0.0,
1475
+ 0.0,
1476
+ 0.0,
1477
+ 0.0,
1478
+ 0.0
1479
+ ],
1480
+ "q01": [
1481
+ 0.0,
1482
+ 0.0,
1483
+ 0.0,
1484
+ 0.0,
1485
+ 0.0,
1486
+ 0.0,
1487
+ 0.0
1488
+ ],
1489
+ "q99": [
1490
+ 0.0,
1491
+ 0.0,
1492
+ 0.0,
1493
+ 0.0,
1494
+ 0.0,
1495
+ 0.0,
1496
+ 0.0
1497
+ ]
1498
+ },
1499
+ "num_transitions": 358234,
1500
+ "num_trajectories": 570
1501
+ },
1502
+ "austin_buds_dataset_converted_externally_to_rlds/0.1.0": {
1503
+ "action": {
1504
+ "mean": [
1505
+ -0.07678329944610596,
1506
+ 0.0036849123425781727,
1507
+ 0.05644941329956055,
1508
+ 0.0,
1509
+ 0.0,
1510
+ 0.0,
1511
+ 0.3510494828224182
1512
+ ],
1513
+ "std": [
1514
+ 0.6367746591567993,
1515
+ 0.3788914680480957,
1516
+ 0.47796377539634705,
1517
+ 0.0,
1518
+ 0.0,
1519
+ 0.0,
1520
+ 0.4772108495235443
1521
+ ],
1522
+ "max": [
1523
+ 1.0,
1524
+ 1.0,
1525
+ 1.0,
1526
+ 0.0,
1527
+ 0.0,
1528
+ 0.0,
1529
+ 1.0
1530
+ ],
1531
+ "min": [
1532
+ -1.0,
1533
+ -1.0,
1534
+ -1.0,
1535
+ 0.0,
1536
+ 0.0,
1537
+ 0.0,
1538
+ 0.0
1539
+ ],
1540
+ "q01": [
1541
+ -1.0,
1542
+ -0.9599999785423279,
1543
+ -0.8714285492897034,
1544
+ 0.0,
1545
+ 0.0,
1546
+ 0.0,
1547
+ 0.0
1548
+ ],
1549
+ "q99": [
1550
+ 1.0,
1551
+ 0.8600000143051147,
1552
+ 1.0,
1553
+ 0.0,
1554
+ 0.0,
1555
+ 0.0,
1556
+ 1.0
1557
+ ],
1558
+ "mask": [
1559
+ true,
1560
+ true,
1561
+ true,
1562
+ true,
1563
+ true,
1564
+ true,
1565
+ false
1566
+ ]
1567
+ },
1568
+ "proprio": {
1569
+ "mean": [
1570
+ 0.0,
1571
+ 0.0,
1572
+ 0.0,
1573
+ 0.0,
1574
+ 0.0,
1575
+ 0.0,
1576
+ 0.0
1577
+ ],
1578
+ "std": [
1579
+ 0.0,
1580
+ 0.0,
1581
+ 0.0,
1582
+ 0.0,
1583
+ 0.0,
1584
+ 0.0,
1585
+ 0.0
1586
+ ],
1587
+ "max": [
1588
+ 0.0,
1589
+ 0.0,
1590
+ 0.0,
1591
+ 0.0,
1592
+ 0.0,
1593
+ 0.0,
1594
+ 0.0
1595
+ ],
1596
+ "min": [
1597
+ 0.0,
1598
+ 0.0,
1599
+ 0.0,
1600
+ 0.0,
1601
+ 0.0,
1602
+ 0.0,
1603
+ 0.0
1604
+ ],
1605
+ "q01": [
1606
+ 0.0,
1607
+ 0.0,
1608
+ 0.0,
1609
+ 0.0,
1610
+ 0.0,
1611
+ 0.0,
1612
+ 0.0
1613
+ ],
1614
+ "q99": [
1615
+ 0.0,
1616
+ 0.0,
1617
+ 0.0,
1618
+ 0.0,
1619
+ 0.0,
1620
+ 0.0,
1621
+ 0.0
1622
+ ]
1623
+ },
1624
+ "num_transitions": 34112,
1625
+ "num_trajectories": 50
1626
+ },
1627
+ "nyu_franka_play_dataset_converted_externally_to_rlds/0.1.0": {
1628
+ "action": {
1629
+ "mean": [
1630
+ 0.0010219910182058811,
1631
+ -0.00012002632865915075,
1632
+ 0.00032894135802052915,
1633
+ 0.0015034276293590665,
1634
+ -0.002198528265580535,
1635
+ -0.0016632305923849344,
1636
+ 0.7230083346366882
1637
+ ],
1638
+ "std": [
1639
+ 0.013274150900542736,
1640
+ 0.013215919025242329,
1641
+ 0.01282210648059845,
1642
+ 0.27324533462524414,
1643
+ 0.05702253058552742,
1644
+ 0.03917279839515686,
1645
+ 0.44753193855285645
1646
+ ],
1647
+ "max": [
1648
+ 0.06424188613891602,
1649
+ 0.07027634978294373,
1650
+ 0.06129661202430725,
1651
+ 6.281067848205566,
1652
+ 0.1967729926109314,
1653
+ 0.26377415657043457,
1654
+ 1.0
1655
+ ],
1656
+ "min": [
1657
+ -0.05952230095863342,
1658
+ -0.07232445478439331,
1659
+ -0.06730806827545166,
1660
+ -6.278434753417969,
1661
+ -0.21479034423828125,
1662
+ -0.3627619743347168,
1663
+ 0.0
1664
+ ],
1665
+ "q01": [
1666
+ -0.03199600875377655,
1667
+ -0.032861671447753905,
1668
+ -0.03368805110454559,
1669
+ -0.12080862045288086,
1670
+ -0.12175218224525451,
1671
+ -0.11370223641395569,
1672
+ 0.0
1673
+ ],
1674
+ "q99": [
1675
+ 0.03101520001888276,
1676
+ 0.0373908892273903,
1677
+ 0.03646374464035038,
1678
+ 0.11764093399047852,
1679
+ 0.1258920183777809,
1680
+ 0.09366151213645942,
1681
+ 1.0
1682
+ ],
1683
+ "mask": [
1684
+ true,
1685
+ true,
1686
+ true,
1687
+ true,
1688
+ true,
1689
+ true,
1690
+ false
1691
+ ]
1692
+ },
1693
+ "proprio": {
1694
+ "mean": [
1695
+ 0.0,
1696
+ 0.0,
1697
+ 0.0,
1698
+ 0.0,
1699
+ 0.0,
1700
+ 0.0,
1701
+ 0.0
1702
+ ],
1703
+ "std": [
1704
+ 0.0,
1705
+ 0.0,
1706
+ 0.0,
1707
+ 0.0,
1708
+ 0.0,
1709
+ 0.0,
1710
+ 0.0
1711
+ ],
1712
+ "max": [
1713
+ 0.0,
1714
+ 0.0,
1715
+ 0.0,
1716
+ 0.0,
1717
+ 0.0,
1718
+ 0.0,
1719
+ 0.0
1720
+ ],
1721
+ "min": [
1722
+ 0.0,
1723
+ 0.0,
1724
+ 0.0,
1725
+ 0.0,
1726
+ 0.0,
1727
+ 0.0,
1728
+ 0.0
1729
+ ],
1730
+ "q01": [
1731
+ 0.0,
1732
+ 0.0,
1733
+ 0.0,
1734
+ 0.0,
1735
+ 0.0,
1736
+ 0.0,
1737
+ 0.0
1738
+ ],
1739
+ "q99": [
1740
+ 0.0,
1741
+ 0.0,
1742
+ 0.0,
1743
+ 0.0,
1744
+ 0.0,
1745
+ 0.0,
1746
+ 0.0
1747
+ ]
1748
+ },
1749
+ "num_transitions": 44875,
1750
+ "num_trajectories": 456
1751
+ },
1752
+ "furniture_bench_dataset_converted_externally_to_rlds/0.1.0": {
1753
+ "action": {
1754
+ "mean": [
1755
+ 0.0001461071806261316,
1756
+ 0.0010830992832779884,
1757
+ 0.0006224963581189513,
1758
+ -0.0033032014034688473,
1759
+ -0.002688060747459531,
1760
+ 0.018242614343762398,
1761
+ 0.48854944109916687
1762
+ ],
1763
+ "std": [
1764
+ 0.016107233241200447,
1765
+ 0.014891570433974266,
1766
+ 0.014014236629009247,
1767
+ 0.05827433615922928,
1768
+ 0.11417083442211151,
1769
+ 0.33479660749435425,
1770
+ 0.4999157190322876
1771
+ ],
1772
+ "max": [
1773
+ 0.10000000149011612,
1774
+ 0.10000000149011612,
1775
+ 0.10000000149011612,
1776
+ 0.8651833534240723,
1777
+ 1.0909736156463623,
1778
+ 2.863185405731201,
1779
+ 1.0
1780
+ ],
1781
+ "min": [
1782
+ -0.10495579987764359,
1783
+ -0.10939455777406693,
1784
+ -0.10000000149011612,
1785
+ -0.971906840801239,
1786
+ -1.0475432872772217,
1787
+ -3.06000018119812,
1788
+ 0.0
1789
+ ],
1790
+ "q01": [
1791
+ -0.053988199681043625,
1792
+ -0.05049169331789017,
1793
+ -0.032499241530895236,
1794
+ -0.1953887003660202,
1795
+ -0.41674559473991396,
1796
+ -0.8886768388748169,
1797
+ 0.0
1798
+ ],
1799
+ "q99": [
1800
+ 0.05414841488003723,
1801
+ 0.04965164884924884,
1802
+ 0.060055799782276154,
1803
+ 0.18231668293476103,
1804
+ 0.39867786407470646,
1805
+ 0.8772023963928218,
1806
+ 1.0
1807
+ ],
1808
+ "mask": [
1809
+ true,
1810
+ true,
1811
+ true,
1812
+ true,
1813
+ true,
1814
+ true,
1815
+ false
1816
+ ]
1817
+ },
1818
+ "proprio": {
1819
+ "mean": [
1820
+ 0.0,
1821
+ 0.0,
1822
+ 0.0,
1823
+ 0.0,
1824
+ 0.0,
1825
+ 0.0,
1826
+ 0.0
1827
+ ],
1828
+ "std": [
1829
+ 0.0,
1830
+ 0.0,
1831
+ 0.0,
1832
+ 0.0,
1833
+ 0.0,
1834
+ 0.0,
1835
+ 0.0
1836
+ ],
1837
+ "max": [
1838
+ 0.0,
1839
+ 0.0,
1840
+ 0.0,
1841
+ 0.0,
1842
+ 0.0,
1843
+ 0.0,
1844
+ 0.0
1845
+ ],
1846
+ "min": [
1847
+ 0.0,
1848
+ 0.0,
1849
+ 0.0,
1850
+ 0.0,
1851
+ 0.0,
1852
+ 0.0,
1853
+ 0.0
1854
+ ],
1855
+ "q01": [
1856
+ 0.0,
1857
+ 0.0,
1858
+ 0.0,
1859
+ 0.0,
1860
+ 0.0,
1861
+ 0.0,
1862
+ 0.0
1863
+ ],
1864
+ "q99": [
1865
+ 0.0,
1866
+ 0.0,
1867
+ 0.0,
1868
+ 0.0,
1869
+ 0.0,
1870
+ 0.0,
1871
+ 0.0
1872
+ ]
1873
+ },
1874
+ "num_transitions": 3948057,
1875
+ "num_trajectories": 5100
1876
+ },
1877
+ "ucsd_kitchen_dataset_converted_externally_to_rlds/0.1.0": {
1878
+ "action": {
1879
+ "mean": [
1880
+ 410.375732421875,
1881
+ 116.9518814086914,
1882
+ 192.35031127929688,
1883
+ -121.22441864013672,
1884
+ -33.84892654418945,
1885
+ 50.016136169433594,
1886
+ 0.741813600063324
1887
+ ],
1888
+ "std": [
1889
+ 122.81488037109375,
1890
+ 108.80094909667969,
1891
+ 130.30345153808594,
1892
+ 116.2820053100586,
1893
+ 27.62191390991211,
1894
+ 41.02091979980469,
1895
+ 0.4376337230205536
1896
+ ],
1897
+ "max": [
1898
+ 678.0,
1899
+ 400.0,
1900
+ 507.0,
1901
+ 180.00001525878906,
1902
+ 6.000013828277588,
1903
+ 116.99998474121094,
1904
+ 1.0
1905
+ ],
1906
+ "min": [
1907
+ 172.0,
1908
+ -166.0,
1909
+ -99.99999237060547,
1910
+ -180.00001525878906,
1911
+ -89.0,
1912
+ -96.00010681152344,
1913
+ 0.0
1914
+ ],
1915
+ "q01": [
1916
+ 200.00001052856445,
1917
+ -102.31004211425781,
1918
+ -94.99993370056153,
1919
+ -180.00001525878906,
1920
+ -88.00001525878906,
1921
+ -38.999977111816406,
1922
+ 0.0
1923
+ ],
1924
+ "q99": [
1925
+ 637.0,
1926
+ 368.30999999999995,
1927
+ 493.0,
1928
+ 180.00001525878906,
1929
+ 0.999983012676239,
1930
+ 105.00001525878906,
1931
+ 1.0
1932
+ ],
1933
+ "mask": [
1934
+ true,
1935
+ true,
1936
+ true,
1937
+ true,
1938
+ true,
1939
+ true,
1940
+ false
1941
+ ]
1942
+ },
1943
+ "proprio": {
1944
+ "mean": [
1945
+ 0.0,
1946
+ 0.0,
1947
+ 0.0,
1948
+ 0.0,
1949
+ 0.0,
1950
+ 0.0,
1951
+ 0.0
1952
+ ],
1953
+ "std": [
1954
+ 0.0,
1955
+ 0.0,
1956
+ 0.0,
1957
+ 0.0,
1958
+ 0.0,
1959
+ 0.0,
1960
+ 0.0
1961
+ ],
1962
+ "max": [
1963
+ 0.0,
1964
+ 0.0,
1965
+ 0.0,
1966
+ 0.0,
1967
+ 0.0,
1968
+ 0.0,
1969
+ 0.0
1970
+ ],
1971
+ "min": [
1972
+ 0.0,
1973
+ 0.0,
1974
+ 0.0,
1975
+ 0.0,
1976
+ 0.0,
1977
+ 0.0,
1978
+ 0.0
1979
+ ],
1980
+ "q01": [
1981
+ 0.0,
1982
+ 0.0,
1983
+ 0.0,
1984
+ 0.0,
1985
+ 0.0,
1986
+ 0.0,
1987
+ 0.0
1988
+ ],
1989
+ "q99": [
1990
+ 0.0,
1991
+ 0.0,
1992
+ 0.0,
1993
+ 0.0,
1994
+ 0.0,
1995
+ 0.0,
1996
+ 0.0
1997
+ ]
1998
+ },
1999
+ "num_transitions": 3970,
2000
+ "num_trajectories": 150
2001
+ },
2002
+ "austin_sailor_dataset_converted_externally_to_rlds/0.1.0": {
2003
+ "action": {
2004
+ "mean": [
2005
+ 0.011825386434793472,
2006
+ 0.0064610871486365795,
2007
+ 0.060236409306526184,
2008
+ 0.0,
2009
+ 0.0,
2010
+ 0.0016465834341943264,
2011
+ 0.5260950326919556
2012
+ ],
2013
+ "std": [
2014
+ 0.46348854899406433,
2015
+ 0.41240164637565613,
2016
+ 0.41186293959617615,
2017
+ 0.0,
2018
+ 0.0,
2019
+ 0.0578608438372612,
2020
+ 0.49893733859062195
2021
+ ],
2022
+ "max": [
2023
+ 1.0,
2024
+ 1.0,
2025
+ 1.0,
2026
+ 0.0,
2027
+ 0.0,
2028
+ 0.375,
2029
+ 1.0
2030
+ ],
2031
+ "min": [
2032
+ -1.0,
2033
+ -1.0,
2034
+ -1.0,
2035
+ 0.0,
2036
+ 0.0,
2037
+ -0.375,
2038
+ 0.0
2039
+ ],
2040
+ "q01": [
2041
+ -1.0,
2042
+ -0.9828571677207947,
2043
+ -0.6000000238418579,
2044
+ 0.0,
2045
+ 0.0,
2046
+ -0.17249999940395355,
2047
+ 0.0
2048
+ ],
2049
+ "q99": [
2050
+ 1.0,
2051
+ 0.9457142949104309,
2052
+ 1.0,
2053
+ 0.0,
2054
+ 0.0,
2055
+ 0.17892856895923615,
2056
+ 1.0
2057
+ ],
2058
+ "mask": [
2059
+ true,
2060
+ true,
2061
+ true,
2062
+ true,
2063
+ true,
2064
+ true,
2065
+ false
2066
+ ]
2067
+ },
2068
+ "proprio": {
2069
+ "mean": [
2070
+ 0.0,
2071
+ 0.0,
2072
+ 0.0,
2073
+ 0.0,
2074
+ 0.0,
2075
+ 0.0,
2076
+ 0.0
2077
+ ],
2078
+ "std": [
2079
+ 0.0,
2080
+ 0.0,
2081
+ 0.0,
2082
+ 0.0,
2083
+ 0.0,
2084
+ 0.0,
2085
+ 0.0
2086
+ ],
2087
+ "max": [
2088
+ 0.0,
2089
+ 0.0,
2090
+ 0.0,
2091
+ 0.0,
2092
+ 0.0,
2093
+ 0.0,
2094
+ 0.0
2095
+ ],
2096
+ "min": [
2097
+ 0.0,
2098
+ 0.0,
2099
+ 0.0,
2100
+ 0.0,
2101
+ 0.0,
2102
+ 0.0,
2103
+ 0.0
2104
+ ],
2105
+ "q01": [
2106
+ 0.0,
2107
+ 0.0,
2108
+ 0.0,
2109
+ 0.0,
2110
+ 0.0,
2111
+ 0.0,
2112
+ 0.0
2113
+ ],
2114
+ "q99": [
2115
+ 0.0,
2116
+ 0.0,
2117
+ 0.0,
2118
+ 0.0,
2119
+ 0.0,
2120
+ 0.0,
2121
+ 0.0
2122
+ ]
2123
+ },
2124
+ "num_transitions": 353094,
2125
+ "num_trajectories": 240
2126
+ },
2127
+ "austin_sirius_dataset_converted_externally_to_rlds/0.1.0": {
2128
+ "action": {
2129
+ "mean": [
2130
+ 0.077476866543293,
2131
+ 0.031955525279045105,
2132
+ 0.04244735836982727,
2133
+ 0.0,
2134
+ 0.0,
2135
+ -0.01603454165160656,
2136
+ 0.43260180950164795
2137
+ ],
2138
+ "std": [
2139
+ 0.3906330168247223,
2140
+ 0.2998153865337372,
2141
+ 0.2782270312309265,
2142
+ 0.0,
2143
+ 0.0,
2144
+ 0.08120641857385635,
2145
+ 0.49528202414512634
2146
+ ],
2147
+ "max": [
2148
+ 1.0002285242080688,
2149
+ 0.960608720779419,
2150
+ 1.105179786682129,
2151
+ 0.0,
2152
+ 0.0,
2153
+ 0.341785728931427,
2154
+ 1.0
2155
+ ],
2156
+ "min": [
2157
+ -1.0183025598526,
2158
+ -0.9800000190734863,
2159
+ -0.9774575233459473,
2160
+ 0.0,
2161
+ 0.0,
2162
+ -0.34607142210006714,
2163
+ 0.0
2164
+ ],
2165
+ "q01": [
2166
+ -0.780905865430832,
2167
+ -0.5667179036140442,
2168
+ -0.5254343223571777,
2169
+ 0.0,
2170
+ 0.0,
2171
+ -0.28495091378688814,
2172
+ 0.0
2173
+ ],
2174
+ "q99": [
2175
+ 0.9569637751579284,
2176
+ 0.6971374487876891,
2177
+ 0.8124888157844541,
2178
+ 0.0,
2179
+ 0.0,
2180
+ 0.1971428543329239,
2181
+ 1.0
2182
+ ],
2183
+ "mask": [
2184
+ true,
2185
+ true,
2186
+ true,
2187
+ true,
2188
+ true,
2189
+ true,
2190
+ false
2191
+ ]
2192
+ },
2193
+ "proprio": {
2194
+ "mean": [
2195
+ 0.0,
2196
+ 0.0,
2197
+ 0.0,
2198
+ 0.0,
2199
+ 0.0,
2200
+ 0.0,
2201
+ 0.0
2202
+ ],
2203
+ "std": [
2204
+ 0.0,
2205
+ 0.0,
2206
+ 0.0,
2207
+ 0.0,
2208
+ 0.0,
2209
+ 0.0,
2210
+ 0.0
2211
+ ],
2212
+ "max": [
2213
+ 0.0,
2214
+ 0.0,
2215
+ 0.0,
2216
+ 0.0,
2217
+ 0.0,
2218
+ 0.0,
2219
+ 0.0
2220
+ ],
2221
+ "min": [
2222
+ 0.0,
2223
+ 0.0,
2224
+ 0.0,
2225
+ 0.0,
2226
+ 0.0,
2227
+ 0.0,
2228
+ 0.0
2229
+ ],
2230
+ "q01": [
2231
+ 0.0,
2232
+ 0.0,
2233
+ 0.0,
2234
+ 0.0,
2235
+ 0.0,
2236
+ 0.0,
2237
+ 0.0
2238
+ ],
2239
+ "q99": [
2240
+ 0.0,
2241
+ 0.0,
2242
+ 0.0,
2243
+ 0.0,
2244
+ 0.0,
2245
+ 0.0,
2246
+ 0.0
2247
+ ]
2248
+ },
2249
+ "num_transitions": 279939,
2250
+ "num_trajectories": 559
2251
+ },
2252
+ "dlr_edan_shared_control_converted_externally_to_rlds/0.1.0": {
2253
+ "action": {
2254
+ "mean": [
2255
+ 0.0066478196531534195,
2256
+ -0.0007657355745323002,
2257
+ 0.006522845011204481,
2258
+ 0.0011679773451760411,
2259
+ -0.006395624950528145,
2260
+ -0.011903021484613419,
2261
+ 0.6985887289047241
2262
+ ],
2263
+ "std": [
2264
+ 0.021393585950136185,
2265
+ 0.018142299726605415,
2266
+ 0.03374377265572548,
2267
+ 0.01743541844189167,
2268
+ 0.03394372761249542,
2269
+ 0.04641878604888916,
2270
+ 0.45885783433914185
2271
+ ],
2272
+ "max": [
2273
+ 0.18991442024707794,
2274
+ 0.0739002525806427,
2275
+ 0.18064819276332855,
2276
+ 0.0866486132144928,
2277
+ 0.13464981317520142,
2278
+ 0.16910280287265778,
2279
+ 1.0
2280
+ ],
2281
+ "min": [
2282
+ -0.10054297000169754,
2283
+ -0.08427435159683228,
2284
+ -0.13533438742160797,
2285
+ -0.17556548118591309,
2286
+ -0.18485672771930695,
2287
+ -0.2680685818195343,
2288
+ 0.0
2289
+ ],
2290
+ "q01": [
2291
+ -0.02987122368067503,
2292
+ -0.06013262912631035,
2293
+ -0.08286409199237824,
2294
+ -0.05924444157630205,
2295
+ -0.15986866518855095,
2296
+ -0.15636983573436739,
2297
+ 0.0
2298
+ ],
2299
+ "q99": [
2300
+ 0.08832092039287087,
2301
+ 0.042126184627413736,
2302
+ 0.11311905644834042,
2303
+ 0.0643695573508739,
2304
+ 0.03941855944693088,
2305
+ 0.156646853685379,
2306
+ 1.0
2307
+ ],
2308
+ "mask": [
2309
+ true,
2310
+ true,
2311
+ true,
2312
+ true,
2313
+ true,
2314
+ true,
2315
+ false
2316
+ ]
2317
+ },
2318
+ "proprio": {
2319
+ "mean": [
2320
+ 0.0,
2321
+ 0.0,
2322
+ 0.0,
2323
+ 0.0,
2324
+ 0.0,
2325
+ 0.0,
2326
+ 0.0
2327
+ ],
2328
+ "std": [
2329
+ 0.0,
2330
+ 0.0,
2331
+ 0.0,
2332
+ 0.0,
2333
+ 0.0,
2334
+ 0.0,
2335
+ 0.0
2336
+ ],
2337
+ "max": [
2338
+ 0.0,
2339
+ 0.0,
2340
+ 0.0,
2341
+ 0.0,
2342
+ 0.0,
2343
+ 0.0,
2344
+ 0.0
2345
+ ],
2346
+ "min": [
2347
+ 0.0,
2348
+ 0.0,
2349
+ 0.0,
2350
+ 0.0,
2351
+ 0.0,
2352
+ 0.0,
2353
+ 0.0
2354
+ ],
2355
+ "q01": [
2356
+ 0.0,
2357
+ 0.0,
2358
+ 0.0,
2359
+ 0.0,
2360
+ 0.0,
2361
+ 0.0,
2362
+ 0.0
2363
+ ],
2364
+ "q99": [
2365
+ 0.0,
2366
+ 0.0,
2367
+ 0.0,
2368
+ 0.0,
2369
+ 0.0,
2370
+ 0.0,
2371
+ 0.0
2372
+ ]
2373
+ },
2374
+ "num_transitions": 8928,
2375
+ "num_trajectories": 104
2376
+ },
2377
+ "iamlab_cmu_pickup_insert_converted_externally_to_rlds/0.1.0": {
2378
+ "action": {
2379
+ "mean": [
2380
+ 0.5274373292922974,
2381
+ 0.028582017868757248,
2382
+ 0.18712472915649414,
2383
+ 1.2339569330215454,
2384
+ 0.03226622939109802,
2385
+ -1.4199472665786743,
2386
+ 0.5550631880760193
2387
+ ],
2388
+ "std": [
2389
+ 0.08108346909284592,
2390
+ 0.1116756722331047,
2391
+ 0.07747555524110794,
2392
+ 2.8737244606018066,
2393
+ 0.02774704433977604,
2394
+ 2.7678685188293457,
2395
+ 0.4969509243965149
2396
+ ],
2397
+ "max": [
2398
+ 0.6634981632232666,
2399
+ 0.23428471386432648,
2400
+ 0.4308285415172577,
2401
+ 3.1415927410125732,
2402
+ 0.13647015392780304,
2403
+ 3.141592502593994,
2404
+ 1.0
2405
+ ],
2406
+ "min": [
2407
+ 0.3071657121181488,
2408
+ -0.29754969477653503,
2409
+ 0.06578229367733002,
2410
+ -3.1415927410125732,
2411
+ -0.04584203287959099,
2412
+ -3.141592502593994,
2413
+ 0.0
2414
+ ],
2415
+ "q01": [
2416
+ 0.3148897051811218,
2417
+ -0.20317550599575043,
2418
+ 0.06785467118024827,
2419
+ -3.140952730178833,
2420
+ -0.029743434861302376,
2421
+ -3.141091251373291,
2422
+ 0.0
2423
+ ],
2424
+ "q99": [
2425
+ 0.6472805738449097,
2426
+ 0.20846802592277527,
2427
+ 0.36855655312538155,
2428
+ 3.1409926891326903,
2429
+ 0.11424950212240226,
2430
+ 3.1410969257354737,
2431
+ 1.0
2432
+ ],
2433
+ "mask": [
2434
+ true,
2435
+ true,
2436
+ true,
2437
+ true,
2438
+ true,
2439
+ true,
2440
+ false
2441
+ ]
2442
+ },
2443
+ "proprio": {
2444
+ "mean": [
2445
+ 0.0,
2446
+ 0.0,
2447
+ 0.0,
2448
+ 0.0,
2449
+ 0.0,
2450
+ 0.0,
2451
+ 0.0
2452
+ ],
2453
+ "std": [
2454
+ 0.0,
2455
+ 0.0,
2456
+ 0.0,
2457
+ 0.0,
2458
+ 0.0,
2459
+ 0.0,
2460
+ 0.0
2461
+ ],
2462
+ "max": [
2463
+ 0.0,
2464
+ 0.0,
2465
+ 0.0,
2466
+ 0.0,
2467
+ 0.0,
2468
+ 0.0,
2469
+ 0.0
2470
+ ],
2471
+ "min": [
2472
+ 0.0,
2473
+ 0.0,
2474
+ 0.0,
2475
+ 0.0,
2476
+ 0.0,
2477
+ 0.0,
2478
+ 0.0
2479
+ ],
2480
+ "q01": [
2481
+ 0.0,
2482
+ 0.0,
2483
+ 0.0,
2484
+ 0.0,
2485
+ 0.0,
2486
+ 0.0,
2487
+ 0.0
2488
+ ],
2489
+ "q99": [
2490
+ 0.0,
2491
+ 0.0,
2492
+ 0.0,
2493
+ 0.0,
2494
+ 0.0,
2495
+ 0.0,
2496
+ 0.0
2497
+ ]
2498
+ },
2499
+ "num_transitions": 146241,
2500
+ "num_trajectories": 631
2501
+ },
2502
+ "utaustin_mutex/0.1.0": {
2503
+ "action": {
2504
+ "mean": [
2505
+ 0.06176406517624855,
2506
+ -0.005005490034818649,
2507
+ 0.10216782987117767,
2508
+ -0.03314131125807762,
2509
+ 0.013895022682845592,
2510
+ -0.011317633092403412,
2511
+ 0.5038976669311523
2512
+ ],
2513
+ "std": [
2514
+ 0.187501460313797,
2515
+ 0.4468473196029663,
2516
+ 0.3792876601219177,
2517
+ 0.14097853004932404,
2518
+ 0.06453699618577957,
2519
+ 0.11765265464782715,
2520
+ 0.501045286655426
2521
+ ],
2522
+ "max": [
2523
+ 1.0,
2524
+ 1.0,
2525
+ 1.0,
2526
+ 0.375,
2527
+ 0.375,
2528
+ 0.375,
2529
+ 1.0
2530
+ ],
2531
+ "min": [
2532
+ -1.0,
2533
+ -1.0,
2534
+ -1.0,
2535
+ -0.375,
2536
+ -0.375,
2537
+ -0.375,
2538
+ 0.0
2539
+ ],
2540
+ "q01": [
2541
+ -0.4285714328289032,
2542
+ -0.9800000190734863,
2543
+ -0.5571428537368774,
2544
+ -0.375,
2545
+ -0.15642857551574707,
2546
+ -0.335357129573822,
2547
+ 0.0
2548
+ ],
2549
+ "q99": [
2550
+ 0.5914285778999329,
2551
+ 0.9714285731315613,
2552
+ 1.0,
2553
+ 0.3278571367263794,
2554
+ 0.207857146859169,
2555
+ 0.25607141852378845,
2556
+ 1.0
2557
+ ],
2558
+ "mask": [
2559
+ true,
2560
+ true,
2561
+ true,
2562
+ true,
2563
+ true,
2564
+ true,
2565
+ false
2566
+ ]
2567
+ },
2568
+ "proprio": {
2569
+ "mean": [
2570
+ 0.0,
2571
+ 0.0,
2572
+ 0.0,
2573
+ 0.0,
2574
+ 0.0,
2575
+ 0.0,
2576
+ 0.0
2577
+ ],
2578
+ "std": [
2579
+ 0.0,
2580
+ 0.0,
2581
+ 0.0,
2582
+ 0.0,
2583
+ 0.0,
2584
+ 0.0,
2585
+ 0.0
2586
+ ],
2587
+ "max": [
2588
+ 0.0,
2589
+ 0.0,
2590
+ 0.0,
2591
+ 0.0,
2592
+ 0.0,
2593
+ 0.0,
2594
+ 0.0
2595
+ ],
2596
+ "min": [
2597
+ 0.0,
2598
+ 0.0,
2599
+ 0.0,
2600
+ 0.0,
2601
+ 0.0,
2602
+ 0.0,
2603
+ 0.0
2604
+ ],
2605
+ "q01": [
2606
+ 0.0,
2607
+ 0.0,
2608
+ 0.0,
2609
+ 0.0,
2610
+ 0.0,
2611
+ 0.0,
2612
+ 0.0
2613
+ ],
2614
+ "q99": [
2615
+ 0.0,
2616
+ 0.0,
2617
+ 0.0,
2618
+ 0.0,
2619
+ 0.0,
2620
+ 0.0,
2621
+ 0.0
2622
+ ]
2623
+ },
2624
+ "num_transitions": 361883,
2625
+ "num_trajectories": 1500
2626
+ },
2627
+ "berkeley_fanuc_manipulation/0.1.0": {
2628
+ "action": {
2629
+ "mean": [
2630
+ 0.0007744057802483439,
2631
+ -0.00031240080716088414,
2632
+ -0.0015001941937953234,
2633
+ -0.0007515158504247665,
2634
+ -0.00015832878125365824,
2635
+ 0.00014327642566058785,
2636
+ 0.699295699596405
2637
+ ],
2638
+ "std": [
2639
+ 0.0034070133697241545,
2640
+ 0.00499219074845314,
2641
+ 0.005344326142221689,
2642
+ 0.007599010597914457,
2643
+ 0.004081932827830315,
2644
+ 0.008568963967263699,
2645
+ 0.45868709683418274
2646
+ ],
2647
+ "max": [
2648
+ 0.009999999776482582,
2649
+ 0.009999999776482582,
2650
+ 0.009999999776482582,
2651
+ 0.03490658476948738,
2652
+ 0.03490658476948738,
2653
+ 0.03490658476948738,
2654
+ 1.0
2655
+ ],
2656
+ "min": [
2657
+ -0.009999999776482582,
2658
+ -0.009999999776482582,
2659
+ -0.009999999776482582,
2660
+ -0.03490658476948738,
2661
+ -0.03490658476948738,
2662
+ -0.03490658476948738,
2663
+ 0.0
2664
+ ],
2665
+ "q01": [
2666
+ -0.009999999776482582,
2667
+ -0.009999999776482582,
2668
+ -0.009999999776482582,
2669
+ -0.03490658476948738,
2670
+ 0.0,
2671
+ -0.03490658476948738,
2672
+ 0.0
2673
+ ],
2674
+ "q99": [
2675
+ 0.009999999776482582,
2676
+ 0.009999999776482582,
2677
+ 0.009999999776482582,
2678
+ 0.03490658476948738,
2679
+ 0.0,
2680
+ 0.03490658476948738,
2681
+ 1.0
2682
+ ],
2683
+ "mask": [
2684
+ true,
2685
+ true,
2686
+ true,
2687
+ true,
2688
+ true,
2689
+ true,
2690
+ false
2691
+ ]
2692
+ },
2693
+ "proprio": {
2694
+ "mean": [
2695
+ 0.0,
2696
+ 0.0,
2697
+ 0.0,
2698
+ 0.0,
2699
+ 0.0,
2700
+ 0.0,
2701
+ 0.0
2702
+ ],
2703
+ "std": [
2704
+ 0.0,
2705
+ 0.0,
2706
+ 0.0,
2707
+ 0.0,
2708
+ 0.0,
2709
+ 0.0,
2710
+ 0.0
2711
+ ],
2712
+ "max": [
2713
+ 0.0,
2714
+ 0.0,
2715
+ 0.0,
2716
+ 0.0,
2717
+ 0.0,
2718
+ 0.0,
2719
+ 0.0
2720
+ ],
2721
+ "min": [
2722
+ 0.0,
2723
+ 0.0,
2724
+ 0.0,
2725
+ 0.0,
2726
+ 0.0,
2727
+ 0.0,
2728
+ 0.0
2729
+ ],
2730
+ "q01": [
2731
+ 0.0,
2732
+ 0.0,
2733
+ 0.0,
2734
+ 0.0,
2735
+ 0.0,
2736
+ 0.0,
2737
+ 0.0
2738
+ ],
2739
+ "q99": [
2740
+ 0.0,
2741
+ 0.0,
2742
+ 0.0,
2743
+ 0.0,
2744
+ 0.0,
2745
+ 0.0,
2746
+ 0.0
2747
+ ]
2748
+ },
2749
+ "num_transitions": 62613,
2750
+ "num_trajectories": 415
2751
+ },
2752
+ "cmu_stretch/0.1.0": {
2753
+ "action": {
2754
+ "mean": [
2755
+ 0.0003630445571616292,
2756
+ 0.0,
2757
+ 0.0016466928645968437,
2758
+ 0.0,
2759
+ 0.0,
2760
+ 0.0,
2761
+ 0.3987048268318176
2762
+ ],
2763
+ "std": [
2764
+ 0.004081855062395334,
2765
+ 0.0,
2766
+ 0.003774340031668544,
2767
+ 0.0,
2768
+ 0.0,
2769
+ 0.0,
2770
+ 0.489638090133667
2771
+ ],
2772
+ "max": [
2773
+ 0.02338407188653946,
2774
+ 0.0,
2775
+ 0.023404927924275398,
2776
+ 0.0,
2777
+ 0.0,
2778
+ 0.0,
2779
+ 1.0
2780
+ ],
2781
+ "min": [
2782
+ -0.019353797659277916,
2783
+ 0.0,
2784
+ -0.02019215188920498,
2785
+ 0.0,
2786
+ 0.0,
2787
+ 0.0,
2788
+ 0.0
2789
+ ],
2790
+ "q01": [
2791
+ -0.011175686959177256,
2792
+ 0.0,
2793
+ -0.0032206363626755773,
2794
+ 0.0,
2795
+ 0.0,
2796
+ 0.0,
2797
+ 0.0
2798
+ ],
2799
+ "q99": [
2800
+ 0.014501785952597848,
2801
+ 0.0,
2802
+ 0.015056106168776728,
2803
+ 0.0,
2804
+ 0.0,
2805
+ 0.0,
2806
+ 1.0
2807
+ ],
2808
+ "mask": [
2809
+ true,
2810
+ true,
2811
+ true,
2812
+ true,
2813
+ true,
2814
+ true,
2815
+ false
2816
+ ]
2817
+ },
2818
+ "proprio": {
2819
+ "mean": [
2820
+ 0.0,
2821
+ 0.0,
2822
+ 0.0,
2823
+ 0.0,
2824
+ 0.0,
2825
+ 0.0,
2826
+ 0.0
2827
+ ],
2828
+ "std": [
2829
+ 0.0,
2830
+ 0.0,
2831
+ 0.0,
2832
+ 0.0,
2833
+ 0.0,
2834
+ 0.0,
2835
+ 0.0
2836
+ ],
2837
+ "max": [
2838
+ 0.0,
2839
+ 0.0,
2840
+ 0.0,
2841
+ 0.0,
2842
+ 0.0,
2843
+ 0.0,
2844
+ 0.0
2845
+ ],
2846
+ "min": [
2847
+ 0.0,
2848
+ 0.0,
2849
+ 0.0,
2850
+ 0.0,
2851
+ 0.0,
2852
+ 0.0,
2853
+ 0.0
2854
+ ],
2855
+ "q01": [
2856
+ 0.0,
2857
+ 0.0,
2858
+ 0.0,
2859
+ 0.0,
2860
+ 0.0,
2861
+ 0.0,
2862
+ 0.0
2863
+ ],
2864
+ "q99": [
2865
+ 0.0,
2866
+ 0.0,
2867
+ 0.0,
2868
+ 0.0,
2869
+ 0.0,
2870
+ 0.0,
2871
+ 0.0
2872
+ ]
2873
+ },
2874
+ "num_transitions": 25016,
2875
+ "num_trajectories": 135
2876
+ },
2877
+ "bc_z/0.1.0": {
2878
+ "action": {
2879
+ "mean": [
2880
+ -0.009958645328879356,
2881
+ 0.0008958434336818755,
2882
+ 0.00499522453173995,
2883
+ 0.000297540333122015,
2884
+ -0.008734511211514473,
2885
+ -0.03068969026207924,
2886
+ 0.8344562649726868
2887
+ ],
2888
+ "std": [
2889
+ 0.030533093959093094,
2890
+ 0.0231416504830122,
2891
+ 0.020642085000872612,
2892
+ 0.04156165570020676,
2893
+ 0.04643021523952484,
2894
+ 0.07697845250368118,
2895
+ 0.36111101508140564
2896
+ ],
2897
+ "max": [
2898
+ 0.2165454924106598,
2899
+ 0.1251407265663147,
2900
+ 0.10772687941789627,
2901
+ 0.33544227480888367,
2902
+ 0.28117990493774414,
2903
+ 0.40614867210388184,
2904
+ 1.0
2905
+ ],
2906
+ "min": [
2907
+ -0.1677047461271286,
2908
+ -0.14630407094955444,
2909
+ -0.10066790133714676,
2910
+ -0.29421567916870117,
2911
+ -0.32101404666900635,
2912
+ -0.4635624885559082,
2913
+ 0.0
2914
+ ],
2915
+ "q01": [
2916
+ -0.09220654994249344,
2917
+ -0.06456145539879798,
2918
+ -0.049121275544166565,
2919
+ -0.11594625547528267,
2920
+ -0.14152548640966414,
2921
+ -0.2251061636209488,
2922
+ 0.0
2923
+ ],
2924
+ "q99": [
2925
+ 0.07628866866230968,
2926
+ 0.058019736707210584,
2927
+ 0.052540797740221024,
2928
+ 0.11740604028105736,
2929
+ 0.11703975558280955,
2930
+ 0.16729306846857078,
2931
+ 1.0
2932
+ ],
2933
+ "mask": [
2934
+ true,
2935
+ true,
2936
+ true,
2937
+ true,
2938
+ true,
2939
+ true,
2940
+ false
2941
+ ]
2942
+ },
2943
+ "proprio": {
2944
+ "mean": [
2945
+ 0.0,
2946
+ 0.0,
2947
+ 0.0,
2948
+ 0.0,
2949
+ 0.0,
2950
+ 0.0,
2951
+ 0.0
2952
+ ],
2953
+ "std": [
2954
+ 0.0,
2955
+ 0.0,
2956
+ 0.0,
2957
+ 0.0,
2958
+ 0.0,
2959
+ 0.0,
2960
+ 0.0
2961
+ ],
2962
+ "max": [
2963
+ 0.0,
2964
+ 0.0,
2965
+ 0.0,
2966
+ 0.0,
2967
+ 0.0,
2968
+ 0.0,
2969
+ 0.0
2970
+ ],
2971
+ "min": [
2972
+ 0.0,
2973
+ 0.0,
2974
+ 0.0,
2975
+ 0.0,
2976
+ 0.0,
2977
+ 0.0,
2978
+ 0.0
2979
+ ],
2980
+ "q01": [
2981
+ 0.0,
2982
+ 0.0,
2983
+ 0.0,
2984
+ 0.0,
2985
+ 0.0,
2986
+ 0.0,
2987
+ 0.0
2988
+ ],
2989
+ "q99": [
2990
+ 0.0,
2991
+ 0.0,
2992
+ 0.0,
2993
+ 0.0,
2994
+ 0.0,
2995
+ 0.0,
2996
+ 0.0
2997
+ ]
2998
+ },
2999
+ "num_transitions": 6015535,
3000
+ "num_trajectories": 43264
3001
+ },
3002
+ "fmb_dataset/1.0.0": {
3003
+ "action": {
3004
+ "mean": [
3005
+ 0.05902976542711258,
3006
+ -0.06476633995771408,
3007
+ -0.09787469357252121,
3008
+ 0.004325387068092823,
3009
+ 0.00028963759541511536,
3010
+ -0.04457257315516472,
3011
+ 0.7336440086364746
3012
+ ],
3013
+ "std": [
3014
+ 0.28809186816215515,
3015
+ 0.2820416986942291,
3016
+ 0.4626740515232086,
3017
+ 0.3266514539718628,
3018
+ 0.10842999070882797,
3019
+ 0.34400978684425354,
3020
+ 0.4435289800167084
3021
+ ],
3022
+ "max": [
3023
+ 1.399999976158142,
3024
+ 1.0,
3025
+ 1.399999976158142,
3026
+ 1.0,
3027
+ 1.0,
3028
+ 1.0,
3029
+ 1.0
3030
+ ],
3031
+ "min": [
3032
+ -1.399999976158142,
3033
+ -1.399999976158142,
3034
+ -1.0,
3035
+ -1.0,
3036
+ -1.0,
3037
+ -1.0,
3038
+ 0.0
3039
+ ],
3040
+ "q01": [
3041
+ -0.8257142901420593,
3042
+ -1.399999976158142,
3043
+ -1.0,
3044
+ -1.0,
3045
+ -0.3028571307659149,
3046
+ -1.0,
3047
+ 0.0
3048
+ ],
3049
+ "q99": [
3050
+ 1.0,
3051
+ 0.5257142782211304,
3052
+ 1.0,
3053
+ 1.0,
3054
+ 0.3400000035762787,
3055
+ 1.0,
3056
+ 1.0
3057
+ ],
3058
+ "mask": [
3059
+ true,
3060
+ true,
3061
+ true,
3062
+ true,
3063
+ true,
3064
+ true,
3065
+ false
3066
+ ]
3067
+ },
3068
+ "proprio": {
3069
+ "mean": [
3070
+ 0.0,
3071
+ 0.0,
3072
+ 0.0,
3073
+ 0.0,
3074
+ 0.0,
3075
+ 0.0,
3076
+ 0.0
3077
+ ],
3078
+ "std": [
3079
+ 0.0,
3080
+ 0.0,
3081
+ 0.0,
3082
+ 0.0,
3083
+ 0.0,
3084
+ 0.0,
3085
+ 0.0
3086
+ ],
3087
+ "max": [
3088
+ 0.0,
3089
+ 0.0,
3090
+ 0.0,
3091
+ 0.0,
3092
+ 0.0,
3093
+ 0.0,
3094
+ 0.0
3095
+ ],
3096
+ "min": [
3097
+ 0.0,
3098
+ 0.0,
3099
+ 0.0,
3100
+ 0.0,
3101
+ 0.0,
3102
+ 0.0,
3103
+ 0.0
3104
+ ],
3105
+ "q01": [
3106
+ 0.0,
3107
+ 0.0,
3108
+ 0.0,
3109
+ 0.0,
3110
+ 0.0,
3111
+ 0.0,
3112
+ 0.0
3113
+ ],
3114
+ "q99": [
3115
+ 0.0,
3116
+ 0.0,
3117
+ 0.0,
3118
+ 0.0,
3119
+ 0.0,
3120
+ 0.0,
3121
+ 0.0
3122
+ ]
3123
+ },
3124
+ "num_transitions": 1137459,
3125
+ "num_trajectories": 8612
3126
+ },
3127
+ "dobbe/0.0.1": {
3128
+ "action": {
3129
+ "mean": [
3130
+ -0.00011206958151888102,
3131
+ 0.0011229681549593806,
3132
+ -0.00010193959315074608,
3133
+ -7.37128357286565e-05,
3134
+ -0.0006753374473191798,
3135
+ -5.664441778208129e-05,
3136
+ 0.6318688988685608
3137
+ ],
3138
+ "std": [
3139
+ 0.042660679668188095,
3140
+ 0.04428431764245033,
3141
+ 0.12224890291690826,
3142
+ 0.005388470832258463,
3143
+ 0.011246936395764351,
3144
+ 0.006288259290158749,
3145
+ 0.3973240256309509
3146
+ ],
3147
+ "max": [
3148
+ 38.590423583984375,
3149
+ 17.932697296142578,
3150
+ 4.843764305114746,
3151
+ 1.4372116327285767,
3152
+ 0.4340403974056244,
3153
+ 1.2057193517684937,
3154
+ 0.9998947381973267
3155
+ ],
3156
+ "min": [
3157
+ -5.700923442840576,
3158
+ -21.605947494506836,
3159
+ -123.72489929199219,
3160
+ -1.7229845523834229,
3161
+ -0.4998578727245331,
3162
+ -0.8867913484573364,
3163
+ 1.4196479014572105e-06
3164
+ ],
3165
+ "q01": [
3166
+ -0.01119564864784479,
3167
+ -0.014266146533191203,
3168
+ -0.0071747214533388615,
3169
+ -0.009444301575422287,
3170
+ -0.03990109823644161,
3171
+ -0.017422311007976532,
3172
+ 4.003279136668425e-05
3173
+ ],
3174
+ "q99": [
3175
+ 0.01015154086053368,
3176
+ 0.017181577533483497,
3177
+ 0.007216989761218411,
3178
+ 0.010380979906767595,
3179
+ 0.03556173853576176,
3180
+ 0.018032474815845446,
3181
+ 0.9982578039169312
3182
+ ],
3183
+ "mask": [
3184
+ true,
3185
+ true,
3186
+ true,
3187
+ true,
3188
+ true,
3189
+ true,
3190
+ false
3191
+ ]
3192
+ },
3193
+ "proprio": {
3194
+ "mean": [
3195
+ 0.0,
3196
+ 0.0,
3197
+ 0.0,
3198
+ 0.0,
3199
+ 0.0,
3200
+ 0.0,
3201
+ 0.0
3202
+ ],
3203
+ "std": [
3204
+ 0.0,
3205
+ 0.0,
3206
+ 0.0,
3207
+ 0.0,
3208
+ 0.0,
3209
+ 0.0,
3210
+ 0.0
3211
+ ],
3212
+ "max": [
3213
+ 0.0,
3214
+ 0.0,
3215
+ 0.0,
3216
+ 0.0,
3217
+ 0.0,
3218
+ 0.0,
3219
+ 0.0
3220
+ ],
3221
+ "min": [
3222
+ 0.0,
3223
+ 0.0,
3224
+ 0.0,
3225
+ 0.0,
3226
+ 0.0,
3227
+ 0.0,
3228
+ 0.0
3229
+ ],
3230
+ "q01": [
3231
+ 0.0,
3232
+ 0.0,
3233
+ 0.0,
3234
+ 0.0,
3235
+ 0.0,
3236
+ 0.0,
3237
+ 0.0
3238
+ ],
3239
+ "q99": [
3240
+ 0.0,
3241
+ 0.0,
3242
+ 0.0,
3243
+ 0.0,
3244
+ 0.0,
3245
+ 0.0,
3246
+ 0.0
3247
+ ]
3248
+ },
3249
+ "num_transitions": 1139911,
3250
+ "num_trajectories": 5208
3251
+ },
3252
+ "droid/1.0.0": {
3253
+ "action": {
3254
+ "mean": [
3255
+ 0.027425529435276985,
3256
+ -0.0026820411439985037,
3257
+ 0.01595238223671913,
3258
+ 0.0035501928068697453,
3259
+ -0.030532635748386383,
3260
+ -0.006685464642941952,
3261
+ 0.5860344171524048
3262
+ ],
3263
+ "std": [
3264
+ 0.25387412309646606,
3265
+ 0.18426834046840668,
3266
+ 0.22532416880130768,
3267
+ 0.21757009625434875,
3268
+ 0.22572560608386993,
3269
+ 0.2867794930934906,
3270
+ 0.4287726879119873
3271
+ ],
3272
+ "max": [
3273
+ 0.9999998211860657,
3274
+ 0.999991774559021,
3275
+ 0.9999973177909851,
3276
+ 0.9999874830245972,
3277
+ 0.9999954104423523,
3278
+ 0.9999998807907104,
3279
+ 1.0
3280
+ ],
3281
+ "min": [
3282
+ -0.9999999403953552,
3283
+ -0.9999951124191284,
3284
+ -0.9999960660934448,
3285
+ -0.9999980330467224,
3286
+ -0.9999982118606567,
3287
+ -0.9999998807907104,
3288
+ 0.0
3289
+ ],
3290
+ "q01": [
3291
+ -0.7776297926902771,
3292
+ -0.5803514122962952,
3293
+ -0.5795090794563293,
3294
+ -0.6464047729969025,
3295
+ -0.7041108310222626,
3296
+ -0.8895104378461838,
3297
+ 0.0
3298
+ ],
3299
+ "q99": [
3300
+ 0.7597932070493698,
3301
+ 0.5726242214441299,
3302
+ 0.7351000607013702,
3303
+ 0.6705610305070877,
3304
+ 0.6464948207139969,
3305
+ 0.8897542208433151,
3306
+ 1.0
3307
+ ],
3308
+ "mask": [
3309
+ true,
3310
+ true,
3311
+ true,
3312
+ true,
3313
+ true,
3314
+ true,
3315
+ false
3316
+ ]
3317
+ },
3318
+ "proprio": {
3319
+ "mean": [
3320
+ 0.0,
3321
+ 0.0,
3322
+ 0.0,
3323
+ 0.0,
3324
+ 0.0,
3325
+ 0.0,
3326
+ 0.0
3327
+ ],
3328
+ "std": [
3329
+ 0.0,
3330
+ 0.0,
3331
+ 0.0,
3332
+ 0.0,
3333
+ 0.0,
3334
+ 0.0,
3335
+ 0.0
3336
+ ],
3337
+ "max": [
3338
+ 0.0,
3339
+ 0.0,
3340
+ 0.0,
3341
+ 0.0,
3342
+ 0.0,
3343
+ 0.0,
3344
+ 0.0
3345
+ ],
3346
+ "min": [
3347
+ 0.0,
3348
+ 0.0,
3349
+ 0.0,
3350
+ 0.0,
3351
+ 0.0,
3352
+ 0.0,
3353
+ 0.0
3354
+ ],
3355
+ "q01": [
3356
+ 0.0,
3357
+ 0.0,
3358
+ 0.0,
3359
+ 0.0,
3360
+ 0.0,
3361
+ 0.0,
3362
+ 0.0
3363
+ ],
3364
+ "q99": [
3365
+ 0.0,
3366
+ 0.0,
3367
+ 0.0,
3368
+ 0.0,
3369
+ 0.0,
3370
+ 0.0,
3371
+ 0.0
3372
+ ]
3373
+ },
3374
+ "num_transitions": 27044326,
3375
+ "num_trajectories": 92233
3376
+ },
3377
+ "rh20t_rlds/1.0.0": {
3378
+ "action": {
3379
+ "mean": [
3380
+ -5.332157638779582e+28,
3381
+ -1.5128827327837974e+29,
3382
+ -1.832736619079747e+28,
3383
+ 0.5735913515090942,
3384
+ -0.00847744569182396,
3385
+ -0.5566052198410034,
3386
+ 0.3186892569065094
3387
+ ],
3388
+ "std": [
3389
+ Infinity,
3390
+ Infinity,
3391
+ Infinity,
3392
+ 2.2581026554107666,
3393
+ 0.1548534482717514,
3394
+ 2.2581026554107666,
3395
+ 0.39917993545532227
3396
+ ],
3397
+ "max": [
3398
+ 7.582831568163597e+35,
3399
+ 7.557172735451728e+35,
3400
+ 2.2717764477020827e+27,
3401
+ 3.1415927410125732,
3402
+ 1.5116956233978271,
3403
+ 3.1415927410125732,
3404
+ 1.0
3405
+ ],
3406
+ "min": [
3407
+ -3.5543094244408723e+36,
3408
+ -8.723098019507117e+36,
3409
+ -9.648338287048974e+35,
3410
+ -3.1415927410125732,
3411
+ -1.5062522888183594,
3412
+ -3.1415927410125732,
3413
+ 0.0
3414
+ ],
3415
+ "q01": [
3416
+ 0.36028257966041566,
3417
+ -0.272584410905838,
3418
+ 0.005985925104469062,
3419
+ -3.1411514282226562,
3420
+ -0.5925320792198181,
3421
+ -3.1415159702301025,
3422
+ 0.0
3423
+ ],
3424
+ "q99": [
3425
+ 0.7534684538841248,
3426
+ 0.31738221645355225,
3427
+ 0.33061375379562374,
3428
+ 3.141425132751465,
3429
+ 0.47507260441780086,
3430
+ 3.141479730606079,
3431
+ 1.0
3432
+ ],
3433
+ "mask": [
3434
+ true,
3435
+ true,
3436
+ true,
3437
+ true,
3438
+ true,
3439
+ true,
3440
+ false
3441
+ ]
3442
+ },
3443
+ "proprio": {
3444
+ "mean": [
3445
+ 0.0,
3446
+ 0.0,
3447
+ 0.0,
3448
+ 0.0,
3449
+ 0.0,
3450
+ 0.0,
3451
+ 0.0
3452
+ ],
3453
+ "std": [
3454
+ 0.0,
3455
+ 0.0,
3456
+ 0.0,
3457
+ 0.0,
3458
+ 0.0,
3459
+ 0.0,
3460
+ 0.0
3461
+ ],
3462
+ "max": [
3463
+ 0.0,
3464
+ 0.0,
3465
+ 0.0,
3466
+ 0.0,
3467
+ 0.0,
3468
+ 0.0,
3469
+ 0.0
3470
+ ],
3471
+ "min": [
3472
+ 0.0,
3473
+ 0.0,
3474
+ 0.0,
3475
+ 0.0,
3476
+ 0.0,
3477
+ 0.0,
3478
+ 0.0
3479
+ ],
3480
+ "q01": [
3481
+ 0.0,
3482
+ 0.0,
3483
+ 0.0,
3484
+ 0.0,
3485
+ 0.0,
3486
+ 0.0,
3487
+ 0.0
3488
+ ],
3489
+ "q99": [
3490
+ 0.0,
3491
+ 0.0,
3492
+ 0.0,
3493
+ 0.0,
3494
+ 0.0,
3495
+ 0.0,
3496
+ 0.0
3497
+ ]
3498
+ },
3499
+ "num_transitions": 52644433,
3500
+ "num_trajectories": 104392
3501
+ }
3502
+ }
example.png ADDED
generation_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 2,
4
+ "cache_implementation": "hybrid",
5
+ "eos_token_id": 1,
6
+ "pad_token_id": 0,
7
+ "transformers_version": "4.47.0"
8
+ }
model-00001-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca1014c81284474e05df34c9114892635ac6402a252867d424b2a8335e7276a7
3
+ size 4969426016
model-00002-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07090d7f3cfb91a75c08fc0513bd05994154c347043ac0894ffb899744b57281
3
+ size 3086476734
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
modeling_ego3d.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+ # Copyright (c) 2025 IPEC at Shanghai AI Laboratory
3
+ # Permission is hereby granted, free of charge, to use, copy, modify, merge, publish,
4
+ # distribute, sublicense, and/or sell copies of the Software, subject to the following conditions:
5
+ # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
6
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND.
7
+ # coding=utf-8
8
+
9
+ """Modified Flash version of zoe model for fast training."""
10
+
11
+ import torch.utils.checkpoint
12
+ from torch import nn
13
+ from transformers.utils import logging
14
+ import torchvision.transforms.functional as F
15
+ import numpy as np
16
+ import math
17
+
18
+ logger = logging.get_logger(__name__)
19
+
20
+
21
+ class Ego3DPositionEmbeddingMLP(nn.Module):
22
+ """Absolute pos embedding, learned.
23
+ https://github.com/kwea123/nerf_pl/blob/52aeb387da64a9ad9a0f914ea9b049ffc598b20c/models/nerf.py#L4
24
+ """
25
+
26
+ def __init__(self, in_channels=3, num_pos_feats=768, n_freqs=8, logscale=True):
27
+ super(Ego3DPositionEmbeddingMLP, self).__init__()
28
+ self.n_freqs = n_freqs
29
+ self.freq_out_channels = in_channels * (2 * n_freqs + 1)
30
+ if logscale:
31
+ freq_bands = 2 ** torch.linspace(0, n_freqs - 1, n_freqs)
32
+ else:
33
+ freq_bands = torch.linspace(1, 2 ** (n_freqs - 1), n_freqs)
34
+
35
+ center = torch.tensor([0., 0., 2.]).repeat(in_channels // 3)
36
+ self.register_buffer("freq_bands", freq_bands, persistent=False)
37
+ self.register_buffer("center", center, persistent=False)
38
+
39
+ self.position_embedding_head = nn.Sequential(
40
+ nn.Linear(self.freq_out_channels, num_pos_feats),
41
+ nn.LayerNorm(num_pos_feats),
42
+ nn.ReLU(),
43
+ nn.Linear(num_pos_feats, num_pos_feats),
44
+ )
45
+ self._reset_parameters()
46
+
47
+ def _reset_parameters(self):
48
+ """init with small weights to maintain stable training."""
49
+ for p in self.parameters():
50
+ if p.dim() > 1:
51
+ nn.init.xavier_uniform_(p, gain=0.01)
52
+
53
+ @torch.no_grad()
54
+ def frequency_encoding(self, xyz):
55
+ """
56
+ Embeds x to (x, sin(2^k x), cos(2^k x), ...)
57
+ Different from the paper, "x" is also in the output
58
+ See https://github.com/bmild/nerf/issues/12
59
+ x \in [-2, 2]
60
+ y \in [-2, 2]
61
+ z \in [0., 4]
62
+ Inputs:
63
+ x: (b n m)
64
+ Outputs:
65
+ out: (b n o)
66
+ """
67
+ xyz_n = ((xyz - self.center) / 2.0).to(self.freq_bands.dtype)
68
+ xyz_feq = xyz_n.unsqueeze(-1) * self.freq_bands # (b n m 1)
69
+ sin_xyz, cos_xyz = torch.sin(xyz_feq), torch.cos(xyz_feq) # (b n m nf)
70
+ encoding = torch.cat([xyz_n.unsqueeze(-1), sin_xyz, cos_xyz], -1).reshape(*xyz.shape[:2], -1)
71
+ return encoding
72
+
73
+ def forward(self, xyz):
74
+ """Forward pass, xyz is (B, N, 3or6), output (B, N, F)."""
75
+ # TODO: encoding with 3D position
76
+ freq_encoding = self.frequency_encoding(xyz)
77
+ position_embedding = self.position_embedding_head(freq_encoding)
78
+ return position_embedding
79
+
80
+
81
+ def get_resize_output_image_size(
82
+ input_height: int,
83
+ input_width: int,
84
+ output_size: tuple = (384, 512),
85
+ keep_aspect_ratio: bool = True,
86
+ multiple: int = 32,
87
+ ):
88
+ def constrain_to_multiple_of(val, multiple, min_val=0):
89
+ x = (np.round(val / multiple) * multiple).astype(int)
90
+ if x < min_val:
91
+ x = math.ceil(val / multiple) * multiple
92
+ return x
93
+
94
+ output_height, output_width = output_size
95
+ scale_height = output_height / input_height
96
+ scale_width = output_width / input_width
97
+
98
+ if keep_aspect_ratio:
99
+ # scale as little as possible
100
+ if abs(1 - scale_width) < abs(1 - scale_height):
101
+ scale_height = scale_width
102
+ else:
103
+ scale_width = scale_height
104
+
105
+ new_height = constrain_to_multiple_of(scale_height * input_height, multiple=multiple)
106
+ new_width = constrain_to_multiple_of(scale_width * input_width, multiple=multiple)
107
+
108
+ return (int(new_height), int(new_width))
109
+
110
+
111
+ def process_zoe(pixel_values, pad_mode="reflect", output_size=(384, 512)):
112
+ """https://github.com/huggingface/transformers/blob/v4.45.2/src/transformers/models/zoedepth/image_processing_zoedepth.py"""
113
+ # h, w = images.shape[-2:]
114
+ # pad images
115
+ ph, pw = 31, 31 # int((h / 2)**0.5 * 3), int((w / 2)**0.5 * 3) # 32, 31
116
+ images = torch.nn.functional.pad(pixel_values, (pw, pw, ph, ph), mode=pad_mode)
117
+
118
+ # resize images
119
+ size = (384, 384) # get_resize_output_image_size(h, w, output_size=output_size, keep_aspect_ratio=True, multiple=32) # 384, 384
120
+ images = torch.nn.functional.interpolate(images, size=size, mode="bicubic", align_corners=True)
121
+
122
+ # NOTE: zoe: padding -> resize -> nomalize.
123
+ # BUT: siglip processor get nomalized image, we simplely follow `nomalize -> padding -> resize` in reflect pad mode
124
+ ZOE_MEAN, ZOE_STD = (0.5, 0.5, 0.5), (0.5, 0.5, 0.5)
125
+ images = F.normalize(images, mean=ZOE_MEAN, std=ZOE_STD)
126
+ return images, ph, pw
modeling_gemma2.py ADDED
@@ -0,0 +1,1285 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # custom gemma2 to support flash_attention_2
2
+ # coding=utf-8
3
+ # Copyright 2024 Google Inc. HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ from typing import List, Optional, Tuple, Union
18
+
19
+ import torch
20
+ import torch.nn as nn
21
+
22
+ from transformers.activations import ACT2FN
23
+ from transformers.cache_utils import Cache, HybridCache
24
+ from transformers.generation import GenerationMixin
25
+ from transformers.modeling_outputs import (
26
+ BaseModelOutputWithPast,
27
+ CausalLMOutputWithPast,
28
+ SequenceClassifierOutputWithPast,
29
+ TokenClassifierOutput,
30
+ )
31
+ from transformers.modeling_utils import PreTrainedModel
32
+ from transformers.utils import (
33
+ add_code_sample_docstrings,
34
+ add_start_docstrings,
35
+ add_start_docstrings_to_model_forward,
36
+ is_flash_attn_2_available,
37
+ is_flash_attn_greater_or_equal,
38
+ is_torch_greater_or_equal,
39
+ logging,
40
+ replace_return_docstrings,
41
+ is_flash_attn_greater_or_equal_2_10,
42
+ )
43
+ from transformers import Gemma2Config
44
+
45
+
46
+ if is_flash_attn_2_available():
47
+ from transformers.modeling_flash_attention_utils import _flash_attention_forward
48
+
49
+ if is_torch_greater_or_equal("2.5"):
50
+ from torch.nn.attention.flex_attention import flex_attention
51
+
52
+ logger = logging.get_logger(__name__)
53
+
54
+
55
+ _CHECKPOINT_FOR_DOC = "google/gemma2-7b"
56
+ _CONFIG_FOR_DOC = "Gemma2Config"
57
+
58
+
59
+ class Gemma2RMSNorm(nn.Module):
60
+ def __init__(self, dim: int, eps: float = 1e-6):
61
+ super().__init__()
62
+ self.eps = eps
63
+ self.weight = nn.Parameter(torch.zeros(dim))
64
+
65
+ def _norm(self, x):
66
+ return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
67
+
68
+ def forward(self, x):
69
+ output = self._norm(x.float())
70
+ # Llama does x.to(float16) * w whilst Gemma2 is (x * w).to(float16)
71
+ # See https://github.com/huggingface/transformers/pull/29402
72
+ output = output * (1.0 + self.weight.float())
73
+ return output.type_as(x)
74
+
75
+ def extra_repr(self):
76
+ return f"{tuple(self.weight.shape)}, eps={self.eps}"
77
+
78
+
79
+ class Gemma2MLP(nn.Module):
80
+ def __init__(self, config):
81
+ super().__init__()
82
+ self.config = config
83
+ self.hidden_size = config.hidden_size
84
+ self.intermediate_size = config.intermediate_size
85
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
86
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
87
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
88
+ self.act_fn = ACT2FN[config.hidden_activation]
89
+
90
+ def forward(self, x):
91
+ return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
92
+
93
+
94
+ class Gemma2RotaryEmbedding(nn.Module):
95
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
96
+ super().__init__()
97
+
98
+ self.dim = dim
99
+ self.max_position_embeddings = max_position_embeddings
100
+ self.base = base
101
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float() / self.dim))
102
+ self.register_buffer("inv_freq", tensor=inv_freq, persistent=False)
103
+
104
+ @torch.no_grad()
105
+ def forward(self, x, position_ids, seq_len=None):
106
+ # x: [bs, num_attention_heads, seq_len, head_size]
107
+ self.inv_freq.to(x.device)
108
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
109
+ position_ids_expanded = position_ids[:, None, :].float()
110
+ # Force float32 since bfloat16 loses precision on long contexts
111
+ # See https://github.com/huggingface/transformers/pull/29285
112
+ device_type = x.device.type
113
+ device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
114
+ with torch.autocast(device_type=device_type, enabled=False):
115
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
116
+ emb = torch.cat((freqs, freqs), dim=-1)
117
+ cos = emb.cos()
118
+ sin = emb.sin()
119
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
120
+
121
+
122
+ def rotate_half(x):
123
+ """Rotates half the hidden dims of the input."""
124
+ x1 = x[..., : x.shape[-1] // 2]
125
+ x2 = x[..., x.shape[-1] // 2 :]
126
+ return torch.cat((-x2, x1), dim=-1)
127
+
128
+
129
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
130
+ """Applies Rotary Position Embedding to the query and key tensors.
131
+
132
+ Args:
133
+ q (`torch.Tensor`): The query tensor.
134
+ k (`torch.Tensor`): The key tensor.
135
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
136
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
137
+ position_ids (`torch.Tensor`, *optional*):
138
+ Deprecated and unused.
139
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
140
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
141
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
142
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
143
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
144
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
145
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
146
+ Returns:
147
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
148
+ """
149
+ cos = cos.unsqueeze(unsqueeze_dim)
150
+ sin = sin.unsqueeze(unsqueeze_dim)
151
+ q_embed = (q * cos) + (rotate_half(q) * sin)
152
+ k_embed = (k * cos) + (rotate_half(k) * sin)
153
+ return q_embed, k_embed
154
+
155
+
156
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
157
+ """
158
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
159
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
160
+ """
161
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
162
+ if n_rep == 1:
163
+ return hidden_states
164
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
165
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
166
+
167
+
168
+ def eager_attention_forward(
169
+ config: Gemma2Config,
170
+ query: torch.Tensor,
171
+ key: torch.Tensor,
172
+ value: torch.Tensor,
173
+ mask: Optional[torch.Tensor],
174
+ **_kwargs,
175
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
176
+ key_states = repeat_kv(key, config.num_key_value_groups)
177
+ value_states = repeat_kv(value, config.num_key_value_groups)
178
+
179
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * config.scaling
180
+
181
+ if config.attn_logit_softcapping is not None:
182
+ attn_weights = attn_weights / config.attn_logit_softcapping
183
+ attn_weights = torch.tanh(attn_weights)
184
+ attn_weights = attn_weights * config.attn_logit_softcapping
185
+ if mask is not None: # no matter the length, we just slice it
186
+ causal_mask = mask[:, :, :, : key_states.shape[-2]]
187
+ attn_weights = attn_weights + causal_mask
188
+
189
+ # upcast attention to fp32
190
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
191
+ attn_weights = nn.functional.dropout(attn_weights, p=config.attention_dropout, training=config.training)
192
+ attn_output = torch.matmul(attn_weights, value_states)
193
+ attn_output = attn_output.transpose(1, 2).contiguous()
194
+ return attn_output, attn_weights
195
+
196
+
197
+ def flash_attention_forward(
198
+ config: Gemma2Config,
199
+ query: torch.Tensor,
200
+ key: torch.Tensor,
201
+ value: torch.Tensor,
202
+ mask: Optional[torch.Tensor],
203
+ target_dtype: torch.dtype = torch.float16,
204
+ **_kwargs,
205
+ ) -> Tuple[torch.Tensor, None]:
206
+ # NOTE: None mask cause un defined https://github.com/huggingface/transformers/blob/c8c8dffbe45ebef0a8dba4a51024e5e5e498596b/src/transformers/models/gemma2/modeling_gemma2.py#L211
207
+ seq_len = query.shape[2]
208
+ # print(f"🔥 query {query.shape}, key {key.shape}, value: {value.shape}")
209
+ if mask is not None:
210
+ # print(f"🔥 mask {mask.shape}")
211
+ # seq_len = mask.shape[1]
212
+ query = query[:, :, :seq_len]
213
+ value = value[:, :, :seq_len]
214
+
215
+ # TODO: These transpose are quite inefficient but Flash Attention requires the layout
216
+ # [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor rotary embedding
217
+ query_states = query.transpose(1, 2)
218
+ key_states = key.transpose(1, 2)
219
+ value_states = value.transpose(1, 2)
220
+
221
+ dropout_rate = config.attention_dropout if config.training else 0.0
222
+
223
+ input_dtype = query_states.dtype
224
+ if input_dtype == torch.float32:
225
+ query_states = query_states.to(target_dtype)
226
+ key_states = key_states.to(target_dtype)
227
+ value_states = value_states.to(target_dtype)
228
+
229
+ attn_output = _flash_attention_forward(
230
+ query_states,
231
+ key_states,
232
+ value_states,
233
+ mask,
234
+ seq_len,
235
+ dropout=dropout_rate,
236
+ softmax_scale=config.scaling,
237
+ is_causal=config.is_causal,
238
+ sliding_window=config.sliding_window,
239
+ use_top_left_mask=config._flash_attn_uses_top_left_mask,
240
+ softcap=config.attn_logit_softcapping if is_flash_attn_greater_or_equal("2.6.0") else None,
241
+ )
242
+
243
+ return attn_output, None
244
+
245
+
246
+ def flex_attention_forward(
247
+ config: Gemma2Config,
248
+ query: torch.Tensor,
249
+ key: torch.Tensor,
250
+ value: torch.Tensor,
251
+ mask: Optional[torch.Tensor],
252
+ output_attentions: bool = False,
253
+ **_kwargs,
254
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
255
+ def tanh_softcap(score, b, h, q_idx, kv_idx):
256
+ soft_cap = config.attn_logit_softcapping
257
+ score = soft_cap * torch.tanh(score / soft_cap)
258
+ if mask is not None:
259
+ return score + mask[b][0][q_idx][kv_idx]
260
+ return score
261
+
262
+ attn_output = flex_attention(
263
+ query,
264
+ key,
265
+ value,
266
+ score_mod=tanh_softcap,
267
+ enable_gqa=True,
268
+ scale=config.scaling,
269
+ return_lse=output_attentions,
270
+ )
271
+ if not output_attentions:
272
+ attn_weights = None
273
+ else:
274
+ attn_output, attn_weights = attn_output
275
+
276
+ attn_output = attn_output.transpose(1, 2).contiguous()
277
+ return attn_output, attn_weights
278
+
279
+
280
+ def sdpa_attention_forward(
281
+ config: Gemma2Config,
282
+ query: torch.Tensor,
283
+ key: torch.Tensor,
284
+ value: torch.Tensor,
285
+ mask: Optional[torch.Tensor],
286
+ **_kwargs,
287
+ ) -> Tuple[torch.Tensor, None]:
288
+ key = repeat_kv(key, config.num_key_value_groups)
289
+ value = repeat_kv(value, config.num_key_value_groups)
290
+
291
+ causal_mask = mask
292
+ if mask is not None:
293
+ causal_mask = causal_mask[:, :, :, : key.shape[-2]]
294
+
295
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
296
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
297
+ if query.device.type == "cuda" and causal_mask is not None:
298
+ query = query.contiguous()
299
+ key = key.contiguous()
300
+ value = value.contiguous()
301
+
302
+ # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
303
+ # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
304
+ is_causal = True if causal_mask is None and query.shape[1] > 1 else False
305
+
306
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
307
+ query,
308
+ key,
309
+ value,
310
+ attn_mask=causal_mask,
311
+ dropout_p=config.attention_dropout if config.training else 0.0,
312
+ is_causal=is_causal,
313
+ scale=config.scaling,
314
+ )
315
+ attn_output = attn_output.transpose(1, 2).contiguous()
316
+ return attn_output, None
317
+
318
+
319
+ GEMMA2_ATTENTION_FUNCTION = {
320
+ "flash_attention_2": flash_attention_forward,
321
+ "flex_attention": flex_attention_forward,
322
+ "eager": eager_attention_forward,
323
+ "sdpa": sdpa_attention_forward,
324
+ }
325
+
326
+
327
+ class Gemma2Attention(nn.Module):
328
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
329
+
330
+ def __init__(self, config: Gemma2Config, layer_idx: Optional[int] = None):
331
+ super().__init__()
332
+ self.config = config
333
+ self.layer_idx = layer_idx
334
+
335
+ self.attention_dropout = config.attention_dropout
336
+ self.hidden_size = config.hidden_size
337
+ self.num_heads = config.num_attention_heads
338
+ self.head_dim = config.head_dim
339
+ self.num_key_value_heads = config.num_key_value_heads
340
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
341
+ self.max_position_embeddings = config.max_position_embeddings
342
+ self.rope_theta = config.rope_theta
343
+ self.is_causal = True
344
+ self.scaling = config.query_pre_attn_scalar**-0.5
345
+ self.sliding_window = config.sliding_window if not bool(layer_idx % 2) else None
346
+ self.attn_logit_softcapping = config.attn_logit_softcapping
347
+ if self.hidden_size % self.num_heads != 0:
348
+ raise ValueError(
349
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
350
+ f" and `num_heads`: {self.num_heads})."
351
+ )
352
+
353
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias)
354
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
355
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
356
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.attention_bias)
357
+ self.rotary_emb = Gemma2RotaryEmbedding(
358
+ self.head_dim,
359
+ max_position_embeddings=self.max_position_embeddings,
360
+ base=self.rope_theta,
361
+ )
362
+
363
+ # NOTE: gemma2 do not include _flash_attn_uses_top_left_mask for flash attention
364
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
365
+
366
+ def forward(
367
+ self,
368
+ hidden_states: torch.Tensor,
369
+ attention_mask: Optional[torch.Tensor] = None,
370
+ position_ids: Optional[torch.LongTensor] = None,
371
+ past_key_value: Optional[Cache] = None,
372
+ output_attentions: bool = False,
373
+ use_cache: bool = False,
374
+ cache_position: Optional[torch.LongTensor] = None,
375
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
376
+ bsz, q_len, _ = hidden_states.size()
377
+
378
+ query_states = self.q_proj(hidden_states)
379
+ key_states = self.k_proj(hidden_states)
380
+ value_states = self.v_proj(hidden_states)
381
+
382
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
383
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
384
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
385
+
386
+ cos, sin = self.rotary_emb(value_states, position_ids)
387
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
388
+
389
+ if past_key_value is not None:
390
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
391
+ cache_kwargs = {
392
+ "sin": sin,
393
+ "cos": cos,
394
+ "sliding_window": self.sliding_window,
395
+ "cache_position": cache_position,
396
+ }
397
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
398
+
399
+ if output_attentions and self.config._attn_implementation in ["sdpa", "flash_attention_2"]:
400
+ logger.warning_once("Setting `attention_type` to `flex_attention` because `output_attentions=True`")
401
+ attention_type = "flex_attention"
402
+ else:
403
+ attention_type = self.config._attn_implementation
404
+
405
+ attn_output, attn_weights = GEMMA2_ATTENTION_FUNCTION[attention_type](
406
+ self, query_states, key_states, value_states, attention_mask, output_attentions=output_attentions
407
+ )
408
+
409
+ attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()
410
+ attn_output = self.o_proj(attn_output)
411
+
412
+ if not output_attentions:
413
+ attn_weights = None
414
+
415
+ return attn_output, attn_weights, past_key_value
416
+
417
+
418
+ class Gemma2FlashAttention2(Gemma2Attention):
419
+ def __init__(self, config: Gemma2Config, layer_idx: Optional[int] = None):
420
+ super().__init__(config, layer_idx)
421
+ self.config._attn_implementation = "flash_attention_2"
422
+ logger.warning_once(
423
+ "The `Gemma2FlashAttention2` class is deprecated in favor of simply modifying the `config._attn_implementation`"
424
+ "attribute of the `GemmaAttention` class! It will be removed in v4.48"
425
+ )
426
+
427
+
428
+ class Gemma2SdpaAttention(Gemma2Attention):
429
+ def __init__(self, config: Gemma2Config, layer_idx: Optional[int] = None):
430
+ super().__init__(config, layer_idx)
431
+ self.config._attn_implementation = "sdpa"
432
+ logger.warning_once(
433
+ "The `Gemma2FlashAttention2` class is deprecated in favor of simply modifying the `config._attn_implementation`"
434
+ "attribute of the `GemmaAttention` class! It will be removed in v4.48"
435
+ )
436
+
437
+
438
+ class Gemma2DecoderLayer(nn.Module):
439
+ def __init__(self, config: Gemma2Config, layer_idx: int):
440
+ super().__init__()
441
+ self.hidden_size = config.hidden_size
442
+ self.config = config
443
+ self.is_sliding = not bool(layer_idx % 2)
444
+ self.self_attn = Gemma2Attention(config=config, layer_idx=layer_idx)
445
+ self.mlp = Gemma2MLP(config)
446
+ self.input_layernorm = Gemma2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
447
+ self.post_attention_layernorm = Gemma2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
448
+
449
+ self.pre_feedforward_layernorm = Gemma2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
450
+ self.post_feedforward_layernorm = Gemma2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
451
+ self.sliding_window = config.sliding_window
452
+
453
+ def forward(
454
+ self,
455
+ hidden_states: torch.Tensor,
456
+ attention_mask: Optional[torch.Tensor] = None,
457
+ position_ids: Optional[torch.LongTensor] = None,
458
+ past_key_value: Optional[Cache] = None,
459
+ output_attentions: Optional[bool] = False,
460
+ use_cache: Optional[bool] = False,
461
+ cache_position: Optional[torch.LongTensor] = None,
462
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
463
+ if self.is_sliding and attention_mask is not None: # efficient SDPA and no padding
464
+ # Flash-attn is a 2D tensor
465
+ if self.config._attn_implementation == "flash_attention_2":
466
+ if past_key_value is not None: # when decoding
467
+ attention_mask = attention_mask[:, -self.sliding_window :]
468
+ else:
469
+ min_dtype = torch.finfo(hidden_states.dtype).min
470
+ sliding_window_mask = torch.tril(
471
+ torch.ones_like(attention_mask, dtype=torch.bool), diagonal=-self.sliding_window
472
+ )
473
+ attention_mask = torch.where(sliding_window_mask, min_dtype, attention_mask)
474
+ if attention_mask.shape[-1] <= 1: # when decoding
475
+ attention_mask = attention_mask[:, :, :, -self.sliding_window :]
476
+
477
+ residual = hidden_states
478
+
479
+ hidden_states = self.input_layernorm(hidden_states)
480
+
481
+ # Self Attention
482
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
483
+ hidden_states=hidden_states,
484
+ attention_mask=attention_mask,
485
+ position_ids=position_ids,
486
+ past_key_value=past_key_value,
487
+ output_attentions=output_attentions,
488
+ use_cache=use_cache,
489
+ cache_position=cache_position,
490
+ )
491
+ hidden_states = self.post_attention_layernorm(hidden_states)
492
+ hidden_states = residual + hidden_states
493
+
494
+ residual = hidden_states
495
+ hidden_states = self.pre_feedforward_layernorm(hidden_states)
496
+ hidden_states = self.mlp(hidden_states)
497
+ hidden_states = self.post_feedforward_layernorm(hidden_states)
498
+ hidden_states = residual + hidden_states
499
+
500
+ outputs = (hidden_states,)
501
+
502
+ if output_attentions:
503
+ outputs += (self_attn_weights,)
504
+
505
+ if use_cache:
506
+ outputs += (present_key_value,)
507
+
508
+ return outputs
509
+
510
+
511
+ GEMMA2_START_DOCSTRING = r"""
512
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
513
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
514
+ etc.)
515
+
516
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
517
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
518
+ and behavior.
519
+
520
+ Parameters:
521
+ config ([`Gemma2Config`]):
522
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
523
+ load the weights associated with the model, only the configuration. Check out the
524
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
525
+ """
526
+
527
+
528
+ @add_start_docstrings(
529
+ "The bare Gemma2 Model outputting raw hidden-states without any specific head on top.",
530
+ GEMMA2_START_DOCSTRING,
531
+ )
532
+ class Gemma2PreTrainedModel(PreTrainedModel):
533
+ config_class = Gemma2Config
534
+ base_model_prefix = "model"
535
+ supports_gradient_checkpointing = True
536
+ _no_split_modules = ["Gemma2DecoderLayer"]
537
+ _skip_keys_device_placement = ["past_key_values"]
538
+ _supports_flash_attn_2 = True
539
+ _supports_sdpa = True
540
+ _supports_cache_class = True
541
+ _supports_quantized_cache = False
542
+ _supports_static_cache = True
543
+
544
+ def _init_weights(self, module):
545
+ std = self.config.initializer_range
546
+ if isinstance(module, nn.Linear):
547
+ module.weight.data.normal_(mean=0.0, std=std)
548
+ if module.bias is not None:
549
+ module.bias.data.zero_()
550
+ elif isinstance(module, nn.Embedding):
551
+ module.weight.data.normal_(mean=0.0, std=std)
552
+ if module.padding_idx is not None:
553
+ module.weight.data[module.padding_idx].zero_()
554
+
555
+ @classmethod
556
+ def _check_and_enable_sdpa(cls, config, hard_check_only: bool = False):
557
+ """
558
+ Overloads `PreTrainedModel._check_and_enable_sdpa` so as to DISABLE torch SDPA by default on Gemma2 models.
559
+ SDPA reduces the model performance on Gemma2 because of the logits softcapping.
560
+ """
561
+ config = super()._check_and_enable_sdpa(config, hard_check_only=hard_check_only)
562
+
563
+ # if using the default path -> swap sdpa by eager
564
+ if not hard_check_only and config._attn_implementation == "sdpa":
565
+ config._attn_implementation = "eager"
566
+
567
+ return config
568
+
569
+
570
+ GEMMA2_INPUTS_DOCSTRING = r"""
571
+ Args:
572
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
573
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
574
+ it.
575
+
576
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
577
+ [`PreTrainedTokenizer.__call__`] for details.
578
+
579
+ [What are input IDs?](../glossary#input-ids)
580
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
581
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
582
+
583
+ - 1 for tokens that are **not masked**,
584
+ - 0 for tokens that are **masked**.
585
+
586
+ [What are attention masks?](../glossary#attention-mask)
587
+
588
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
589
+ [`PreTrainedTokenizer.__call__`] for details.
590
+
591
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
592
+ `past_key_values`).
593
+
594
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
595
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
596
+ information on the default strategy.
597
+
598
+ - 1 indicates the head is **not masked**,
599
+ - 0 indicates the head is **masked**.
600
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
601
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
602
+ config.n_positions - 1]`.
603
+
604
+ [What are position IDs?](../glossary#position-ids)
605
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
606
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
607
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
608
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
609
+
610
+ Two formats are allowed:
611
+ - a [`~cache_utils.Cache`] instance, see our
612
+ [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache);
613
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
614
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
615
+ cache format.
616
+
617
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
618
+ legacy cache format will be returned.
619
+
620
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
621
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
622
+ of shape `(batch_size, sequence_length)`.
623
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
624
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
625
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
626
+ model's internal embedding lookup matrix.
627
+ use_cache (`bool`, *optional*):
628
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
629
+ `past_key_values`).
630
+ output_attentions (`bool`, *optional*):
631
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
632
+ tensors for more detail.
633
+ output_hidden_states (`bool`, *optional*):
634
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
635
+ more detail.
636
+ return_dict (`bool`, *optional*):
637
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
638
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
639
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
640
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
641
+ the complete sequence length.
642
+ """
643
+
644
+
645
+ @add_start_docstrings(
646
+ "The bare Gemma2 Model outputting raw hidden-states without any specific head on top.",
647
+ GEMMA2_START_DOCSTRING,
648
+ )
649
+ class Gemma2Model(Gemma2PreTrainedModel):
650
+ """
651
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Gemma2DecoderLayer`]
652
+
653
+ Args:
654
+ config: Gemma2Config
655
+ """
656
+
657
+ def __init__(self, config: Gemma2Config):
658
+ super().__init__(config)
659
+ self.padding_idx = config.pad_token_id
660
+ self.vocab_size = config.vocab_size
661
+
662
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
663
+ self.layers = nn.ModuleList(
664
+ [Gemma2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
665
+ )
666
+ self.norm = Gemma2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
667
+
668
+ self.gradient_checkpointing = False
669
+ if getattr(config, "pretraining_tp", 1) != 1:
670
+ logger.warn("`pretraining_tp` is deprecated, please use `model.tensor_parallel` instead.")
671
+
672
+ # Initialize weights and apply final processing
673
+ self.post_init()
674
+
675
+ def get_input_embeddings(self):
676
+ return self.embed_tokens
677
+
678
+ def set_input_embeddings(self, value):
679
+ self.embed_tokens = value
680
+
681
+ @add_start_docstrings_to_model_forward(GEMMA2_INPUTS_DOCSTRING)
682
+ def forward(
683
+ self,
684
+ input_ids: torch.LongTensor = None,
685
+ attention_mask: Optional[torch.Tensor] = None,
686
+ position_ids: Optional[torch.LongTensor] = None,
687
+ past_key_values: Optional[HybridCache] = None,
688
+ inputs_embeds: Optional[torch.FloatTensor] = None,
689
+ use_cache: Optional[bool] = None,
690
+ output_attentions: Optional[bool] = None,
691
+ output_hidden_states: Optional[bool] = None,
692
+ return_dict: Optional[bool] = None,
693
+ cache_position: Optional[torch.LongTensor] = None,
694
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
695
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
696
+ output_hidden_states = (
697
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
698
+ )
699
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
700
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
701
+
702
+ if (input_ids is None) ^ (inputs_embeds is not None):
703
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
704
+
705
+ if self.gradient_checkpointing and self.training and use_cache:
706
+ logger.warning_once(
707
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
708
+ )
709
+ use_cache = False
710
+
711
+ if inputs_embeds is None:
712
+ inputs_embeds = self.embed_tokens(input_ids)
713
+
714
+ if use_cache and past_key_values is None and not self.training:
715
+ batch_size, seq_len, _ = inputs_embeds.shape
716
+ past_key_values = HybridCache(
717
+ self.config,
718
+ batch_size=batch_size,
719
+ max_cache_len=seq_len,
720
+ device=self.device,
721
+ dtype=inputs_embeds.dtype,
722
+ )
723
+
724
+ if cache_position is None:
725
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
726
+ cache_position = torch.arange(
727
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
728
+ )
729
+
730
+ if position_ids is None:
731
+ position_ids = cache_position.unsqueeze(0)
732
+
733
+ causal_mask = self._update_causal_mask(
734
+ attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
735
+ )
736
+
737
+ # embed positions
738
+ hidden_states = inputs_embeds
739
+
740
+ # normalized
741
+ # Gemma2 downcasts the below to float16, causing sqrt(3072)=55.4256 to become 55.5
742
+ # See https://github.com/huggingface/transformers/pull/29402
743
+ normalizer = torch.tensor(self.config.hidden_size**0.5, dtype=hidden_states.dtype)
744
+ hidden_states = hidden_states * normalizer
745
+
746
+ # decoder layers
747
+ all_hidden_states = () if output_hidden_states else None
748
+ all_self_attns = () if output_attentions else None
749
+
750
+ for decoder_layer in self.layers[: self.config.num_hidden_layers]:
751
+ if output_hidden_states:
752
+ all_hidden_states += (hidden_states,)
753
+
754
+ if self.gradient_checkpointing and self.training:
755
+ layer_outputs = self._gradient_checkpointing_func(
756
+ decoder_layer.__call__,
757
+ hidden_states,
758
+ causal_mask,
759
+ position_ids,
760
+ past_key_values,
761
+ output_attentions,
762
+ use_cache,
763
+ cache_position,
764
+ )
765
+ else:
766
+ layer_outputs = decoder_layer(
767
+ hidden_states,
768
+ attention_mask=causal_mask,
769
+ position_ids=position_ids,
770
+ past_key_value=past_key_values,
771
+ output_attentions=output_attentions,
772
+ use_cache=use_cache,
773
+ cache_position=cache_position,
774
+ )
775
+
776
+ hidden_states = layer_outputs[0]
777
+
778
+ if output_attentions:
779
+ all_self_attns += (layer_outputs[1],)
780
+
781
+ hidden_states = self.norm(hidden_states)
782
+
783
+ if output_hidden_states:
784
+ all_hidden_states += (hidden_states,)
785
+
786
+ next_cache = past_key_values if use_cache else None
787
+
788
+ if not return_dict:
789
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
790
+ return BaseModelOutputWithPast(
791
+ last_hidden_state=hidden_states,
792
+ past_key_values=next_cache,
793
+ hidden_states=all_hidden_states,
794
+ attentions=all_self_attns,
795
+ )
796
+
797
+ @torch.no_grad()
798
+ def _update_causal_mask(
799
+ self,
800
+ attention_mask: torch.Tensor,
801
+ input_tensor: torch.Tensor,
802
+ cache_position: torch.Tensor,
803
+ past_key_values: HybridCache,
804
+ output_attentions: bool,
805
+ ):
806
+ # Flash Attention currently doesn't support static cache but Gemma2 work only with static cache.
807
+ # So we will pass in attention mask as is in any case, not only when ther's padding. Then we'll use its shape
808
+ # to cut out keys/values trailing 0 used in static cache. This workaround should be compile compatible
809
+ # as it doesn't cause dynamic control issues.
810
+ if self.config._attn_implementation == "flash_attention_2":
811
+ return attention_mask
812
+
813
+ dtype, device = input_tensor.dtype, input_tensor.device
814
+ sequence_length = input_tensor.shape[1]
815
+ if isinstance(past_key_values, HybridCache):
816
+ target_length = past_key_values.get_max_cache_shape()
817
+ else:
818
+ target_length = attention_mask.shape[-1] if attention_mask is not None else input_tensor.shape[1]
819
+
820
+ # In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
821
+ causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
822
+ attention_mask,
823
+ sequence_length=sequence_length,
824
+ target_length=target_length,
825
+ dtype=dtype,
826
+ device=device,
827
+ cache_position=cache_position,
828
+ batch_size=input_tensor.shape[0],
829
+ )
830
+ return causal_mask
831
+
832
+ @staticmethod
833
+ def _prepare_4d_causal_attention_mask_with_cache_position(
834
+ attention_mask: torch.Tensor,
835
+ sequence_length: int,
836
+ target_length: int,
837
+ dtype: torch.dtype,
838
+ device: torch.device,
839
+ cache_position: torch.Tensor,
840
+ batch_size: int,
841
+ **kwargs,
842
+ ):
843
+ """
844
+ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
845
+ `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
846
+
847
+ Args:
848
+ attention_mask (`torch.Tensor`):
849
+ A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
850
+ `(batch_size, 1, query_length, key_value_length)`.
851
+ sequence_length (`int`):
852
+ The sequence length being processed.
853
+ target_length (`int`):
854
+ The target length: when generating with static cache, the mask should be as long as the static cache,
855
+ to account for the 0 padding, the part of the cache that is not filled yet.
856
+ dtype (`torch.dtype`):
857
+ The dtype to use for the 4D attention mask.
858
+ device (`torch.device`):
859
+ The device to plcae the 4D attention mask on.
860
+ cache_position (`torch.Tensor`):
861
+ Indices depicting the position of the input sequence tokens in the sequence.
862
+ batch_size (`torch.Tensor`):
863
+ Batch size.
864
+ """
865
+ if attention_mask is not None and attention_mask.dim() == 4:
866
+ # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
867
+ causal_mask = attention_mask
868
+ else:
869
+ min_dtype = torch.finfo(dtype).min
870
+ causal_mask = torch.full(
871
+ (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device
872
+ )
873
+ if sequence_length != 1:
874
+ causal_mask = torch.triu(causal_mask, diagonal=1)
875
+ causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
876
+ causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
877
+ if attention_mask is not None:
878
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
879
+ mask_length = attention_mask.shape[-1]
880
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
881
+ padding_mask = padding_mask == 0
882
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
883
+ padding_mask, min_dtype
884
+ )
885
+
886
+ return causal_mask
887
+
888
+
889
+ class Gemma2ForCausalLM(Gemma2PreTrainedModel, GenerationMixin):
890
+ _tied_weights_keys = ["lm_head.weight"]
891
+ _tp_plan = {"lm_head": "colwise_rep"}
892
+
893
+ def __init__(self, config):
894
+ super().__init__(config)
895
+ self.model = Gemma2Model(config)
896
+ self.vocab_size = config.vocab_size
897
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
898
+
899
+ # Initialize weights and apply final processing
900
+ self.post_init()
901
+
902
+ def get_input_embeddings(self):
903
+ return self.model.embed_tokens
904
+
905
+ def set_input_embeddings(self, value):
906
+ self.model.embed_tokens = value
907
+
908
+ def get_output_embeddings(self):
909
+ return self.lm_head
910
+
911
+ def set_output_embeddings(self, new_embeddings):
912
+ self.lm_head = new_embeddings
913
+
914
+ def set_decoder(self, decoder):
915
+ self.model = decoder
916
+
917
+ def get_decoder(self):
918
+ return self.model
919
+
920
+ @add_start_docstrings_to_model_forward(GEMMA2_INPUTS_DOCSTRING)
921
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
922
+ def forward(
923
+ self,
924
+ input_ids: torch.LongTensor = None,
925
+ attention_mask: Optional[torch.Tensor] = None,
926
+ position_ids: Optional[torch.LongTensor] = None,
927
+ past_key_values: Optional[HybridCache] = None,
928
+ inputs_embeds: Optional[torch.FloatTensor] = None,
929
+ labels: Optional[torch.LongTensor] = None,
930
+ use_cache: Optional[bool] = None,
931
+ output_attentions: Optional[bool] = None,
932
+ output_hidden_states: Optional[bool] = None,
933
+ return_dict: Optional[bool] = None,
934
+ cache_position: Optional[torch.LongTensor] = None,
935
+ num_logits_to_keep: int = 0,
936
+ **loss_kwargs,
937
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
938
+ r"""
939
+ Args:
940
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
941
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
942
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
943
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
944
+
945
+ num_logits_to_keep (`int`, *optional*):
946
+ Calculate logits for the last `num_logits_to_keep` tokens. If `0`, calculate logits for all
947
+ `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
948
+ token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
949
+
950
+ Returns:
951
+
952
+ Example:
953
+
954
+ ```python
955
+ >>> from transformers import AutoTokenizer, GemmaForCausalLM
956
+
957
+ >>> model = GemmaForCausalLM.from_pretrained("google/gemma-2-9b")
958
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-9b")
959
+
960
+ >>> prompt = "What is your favorite condiment?"
961
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
962
+
963
+ >>> # Generate
964
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
965
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
966
+ "What is your favorite condiment?"
967
+ ```"""
968
+
969
+ if self.training and self.config._attn_implementation != "eager":
970
+ logger.warning_once(
971
+ "It is strongly recommended to train Gemma2 models with the `eager` attention implementation "
972
+ f"instead of `{self.config._attn_implementation}`. Use `eager` with `AutoModelForCausalLM.from_pretrained('<path-to-checkpoint>', attn_implementation='eager')`."
973
+ )
974
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
975
+ output_hidden_states = (
976
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
977
+ )
978
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
979
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
980
+ outputs = self.model(
981
+ input_ids=input_ids,
982
+ attention_mask=attention_mask,
983
+ position_ids=position_ids,
984
+ past_key_values=past_key_values,
985
+ inputs_embeds=inputs_embeds,
986
+ use_cache=use_cache,
987
+ output_attentions=output_attentions,
988
+ output_hidden_states=output_hidden_states,
989
+ return_dict=return_dict,
990
+ cache_position=cache_position,
991
+ )
992
+
993
+ hidden_states = outputs[0]
994
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
995
+ logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :])
996
+ if self.config.final_logit_softcapping is not None:
997
+ logits = logits / self.config.final_logit_softcapping
998
+ logits = torch.tanh(logits)
999
+ logits = logits * self.config.final_logit_softcapping
1000
+
1001
+ loss = None
1002
+ if labels is not None:
1003
+ loss = self.loss_function(logits, labels, self.vocab_size, **loss_kwargs)
1004
+
1005
+ if not return_dict:
1006
+ output = (logits,) + outputs[1:]
1007
+ return (loss,) + output if loss is not None else output
1008
+
1009
+ return CausalLMOutputWithPast(
1010
+ loss=loss,
1011
+ logits=logits,
1012
+ past_key_values=outputs.past_key_values,
1013
+ hidden_states=outputs.hidden_states,
1014
+ attentions=outputs.attentions,
1015
+ )
1016
+
1017
+ def prepare_inputs_for_generation(
1018
+ self,
1019
+ input_ids,
1020
+ past_key_values=None,
1021
+ attention_mask=None,
1022
+ inputs_embeds=None,
1023
+ cache_position=None,
1024
+ position_ids=None,
1025
+ use_cache=True,
1026
+ num_logits_to_keep=None,
1027
+ **kwargs,
1028
+ ):
1029
+ # Overwritten: has a special cache type, `HybridCache`
1030
+
1031
+ # If we have cache: let's slice `input_ids` through `cache_position`, to keep only the unprocessed tokens
1032
+ # Exception 1: when passing input_embeds, input_ids may be missing entries
1033
+ # Exception 2: some generation methods do special slicing of input_ids, so we don't need to do it here
1034
+ if past_key_values is not None:
1035
+ if inputs_embeds is not None: # Exception 1
1036
+ input_ids = input_ids[:, -cache_position.shape[0] :]
1037
+ elif input_ids.shape[1] != cache_position.shape[0]: # Default case (the "else", a no op, is Exception 2)
1038
+ input_ids = input_ids[:, cache_position]
1039
+ if attention_mask is not None and position_ids is None:
1040
+ # create position_ids on the fly for batch generation
1041
+ position_ids = attention_mask.long().cumsum(-1) - 1
1042
+ position_ids.masked_fill_(attention_mask == 0, 1)
1043
+ if past_key_values:
1044
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1045
+ # This `clone` call is needed to avoid recapturing cuda graphs with `torch.compile`'s
1046
+ # `mode="reduce-overhead`, as otherwise the input `position_ids` would have various stride
1047
+ # during the decoding. Here, simply using `.contiguous()` is not sufficient as in the
1048
+ # batch size = 1 case, `position_ids` is already contiguous but with varying stride
1049
+ # which retriggers a capture.
1050
+ position_ids = position_ids.clone(memory_format=torch.contiguous_format)
1051
+
1052
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1053
+ if inputs_embeds is not None and cache_position[0] == 0:
1054
+ model_inputs = {"inputs_embeds": inputs_embeds, "input_ids": None}
1055
+ else:
1056
+ # The clone here is for the same reason as for `position_ids`.
1057
+ model_inputs = {"input_ids": input_ids.clone(memory_format=torch.contiguous_format), "inputs_embeds": None}
1058
+
1059
+ if (
1060
+ isinstance(past_key_values, HybridCache)
1061
+ and attention_mask.ndim == 2
1062
+ and not self.config._attn_implementation == "flash_attention_2"
1063
+ ):
1064
+ if model_inputs["inputs_embeds"] is not None:
1065
+ batch_size, sequence_length, _ = model_inputs["inputs_embeds"].shape
1066
+ device = model_inputs["inputs_embeds"].device
1067
+ else:
1068
+ batch_size, sequence_length = model_inputs["input_ids"].shape
1069
+ device = model_inputs["input_ids"].device
1070
+
1071
+ attention_mask = self.model._prepare_4d_causal_attention_mask_with_cache_position(
1072
+ attention_mask,
1073
+ sequence_length=sequence_length,
1074
+ target_length=past_key_values.get_max_cache_shape(),
1075
+ dtype=self.lm_head.weight.dtype,
1076
+ device=device,
1077
+ cache_position=cache_position,
1078
+ batch_size=batch_size,
1079
+ )
1080
+
1081
+ if num_logits_to_keep is not None:
1082
+ model_inputs["num_logits_to_keep"] = num_logits_to_keep
1083
+
1084
+ model_inputs.update(
1085
+ {
1086
+ "position_ids": position_ids,
1087
+ "cache_position": cache_position,
1088
+ "past_key_values": past_key_values,
1089
+ "use_cache": use_cache,
1090
+ "attention_mask": attention_mask,
1091
+ }
1092
+ )
1093
+ return model_inputs
1094
+
1095
+
1096
+ @add_start_docstrings(
1097
+ """
1098
+ The Gemma2 Model transformer with a sequence classification head on top (linear layer).
1099
+
1100
+ [`Gemma2ForSequenceClassification`] uses the last token in order to do the classification, as other causal models
1101
+ (e.g. GPT-2) do.
1102
+
1103
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1104
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1105
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1106
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1107
+ each row of the batch).
1108
+ """,
1109
+ GEMMA2_START_DOCSTRING,
1110
+ )
1111
+ class Gemma2ForSequenceClassification(Gemma2PreTrainedModel):
1112
+ def __init__(self, config):
1113
+ super().__init__(config)
1114
+ self.num_labels = config.num_labels
1115
+ self.model = Gemma2Model(config)
1116
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1117
+
1118
+ # Initialize weights and apply final processing
1119
+ self.post_init()
1120
+
1121
+ def get_input_embeddings(self):
1122
+ return self.model.embed_tokens
1123
+
1124
+ def set_input_embeddings(self, value):
1125
+ self.model.embed_tokens = value
1126
+
1127
+ @add_start_docstrings_to_model_forward(GEMMA2_INPUTS_DOCSTRING)
1128
+ def forward(
1129
+ self,
1130
+ input_ids: Optional[torch.LongTensor] = None,
1131
+ attention_mask: Optional[torch.Tensor] = None,
1132
+ position_ids: Optional[torch.LongTensor] = None,
1133
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
1134
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1135
+ labels: Optional[torch.LongTensor] = None,
1136
+ use_cache: Optional[bool] = None,
1137
+ output_attentions: Optional[bool] = None,
1138
+ output_hidden_states: Optional[bool] = None,
1139
+ return_dict: Optional[bool] = None,
1140
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1141
+ r"""
1142
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1143
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1144
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1145
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1146
+ """
1147
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1148
+
1149
+ transformer_outputs = self.model(
1150
+ input_ids,
1151
+ attention_mask=attention_mask,
1152
+ position_ids=position_ids,
1153
+ past_key_values=past_key_values,
1154
+ inputs_embeds=inputs_embeds,
1155
+ use_cache=use_cache,
1156
+ output_attentions=output_attentions,
1157
+ output_hidden_states=output_hidden_states,
1158
+ return_dict=return_dict,
1159
+ )
1160
+ hidden_states = transformer_outputs[0]
1161
+ logits = self.score(hidden_states)
1162
+
1163
+ if input_ids is not None:
1164
+ batch_size = input_ids.shape[0]
1165
+ else:
1166
+ batch_size = inputs_embeds.shape[0]
1167
+
1168
+ if self.config.pad_token_id is None and batch_size != 1:
1169
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1170
+ if self.config.pad_token_id is None:
1171
+ sequence_lengths = -1
1172
+ else:
1173
+ if input_ids is not None:
1174
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
1175
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
1176
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
1177
+ sequence_lengths = sequence_lengths.to(logits.device)
1178
+ else:
1179
+ sequence_lengths = -1
1180
+
1181
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1182
+
1183
+ loss = None
1184
+ if labels is not None:
1185
+ loss = self.loss_function(logits=logits, labels=labels, pooled_logits=pooled_logits, config=self.config)
1186
+
1187
+ if not return_dict:
1188
+ output = (pooled_logits,) + transformer_outputs[1:]
1189
+ return ((loss,) + output) if loss is not None else output
1190
+
1191
+ return SequenceClassifierOutputWithPast(
1192
+ loss=loss,
1193
+ logits=pooled_logits,
1194
+ past_key_values=transformer_outputs.past_key_values,
1195
+ hidden_states=transformer_outputs.hidden_states,
1196
+ attentions=transformer_outputs.attentions,
1197
+ )
1198
+
1199
+
1200
+ @add_start_docstrings(
1201
+ """
1202
+ The Gemma2 Model transformer with a token classification head on top (a linear layer on top of the hidden-states
1203
+ output) e.g. for Named-Entity-Recognition (NER) tasks.
1204
+ """,
1205
+ GEMMA2_START_DOCSTRING,
1206
+ )
1207
+ class Gemma2ForTokenClassification(Gemma2PreTrainedModel):
1208
+ def __init__(self, config):
1209
+ super().__init__(config)
1210
+ self.num_labels = config.num_labels
1211
+ self.model = Gemma2Model(config)
1212
+ if getattr(config, "classifier_dropout", None) is not None:
1213
+ classifier_dropout = config.classifier_dropout
1214
+ elif getattr(config, "hidden_dropout", None) is not None:
1215
+ classifier_dropout = config.hidden_dropout
1216
+ else:
1217
+ classifier_dropout = 0.1
1218
+ self.dropout = nn.Dropout(classifier_dropout)
1219
+ self.score = nn.Linear(config.hidden_size, config.num_labels)
1220
+
1221
+ # Initialize weights and apply final processing
1222
+ self.post_init()
1223
+
1224
+ def get_input_embeddings(self):
1225
+ return self.model.embed_tokens
1226
+
1227
+ def set_input_embeddings(self, value):
1228
+ self.model.embed_tokens = value
1229
+
1230
+ @add_start_docstrings_to_model_forward(GEMMA2_INPUTS_DOCSTRING)
1231
+ @add_code_sample_docstrings(
1232
+ checkpoint=_CHECKPOINT_FOR_DOC,
1233
+ output_type=TokenClassifierOutput,
1234
+ config_class=_CONFIG_FOR_DOC,
1235
+ )
1236
+ def forward(
1237
+ self,
1238
+ input_ids: Optional[torch.LongTensor] = None,
1239
+ attention_mask: Optional[torch.Tensor] = None,
1240
+ position_ids: Optional[torch.LongTensor] = None,
1241
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1242
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1243
+ labels: Optional[torch.LongTensor] = None,
1244
+ use_cache: Optional[bool] = None,
1245
+ output_attentions: Optional[bool] = None,
1246
+ output_hidden_states: Optional[bool] = None,
1247
+ return_dict: Optional[bool] = None,
1248
+ ) -> Union[Tuple, TokenClassifierOutput]:
1249
+ r"""
1250
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1251
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1252
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1253
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1254
+ """
1255
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1256
+
1257
+ outputs = self.model(
1258
+ input_ids,
1259
+ attention_mask=attention_mask,
1260
+ position_ids=position_ids,
1261
+ past_key_values=past_key_values,
1262
+ inputs_embeds=inputs_embeds,
1263
+ use_cache=use_cache,
1264
+ output_attentions=output_attentions,
1265
+ output_hidden_states=output_hidden_states,
1266
+ return_dict=return_dict,
1267
+ )
1268
+ sequence_output = outputs[0]
1269
+ sequence_output = self.dropout(sequence_output)
1270
+ logits = self.score(sequence_output)
1271
+
1272
+ loss = None
1273
+ if labels is not None:
1274
+ loss = self.loss_function(logits, labels, self.config)
1275
+
1276
+ if not return_dict:
1277
+ output = (logits,) + outputs[2:]
1278
+ return ((loss,) + output) if loss is not None else output
1279
+
1280
+ return TokenClassifierOutput(
1281
+ loss=loss,
1282
+ logits=logits,
1283
+ hidden_states=outputs.hidden_states,
1284
+ attentions=outputs.attentions,
1285
+ )
modeling_spatialvla.py ADDED
@@ -0,0 +1,773 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+ # Copyright (c) 2025 IPEC at Shanghai AI Laboratory
3
+ # Permission is hereby granted, free of charge, to use, copy, modify, merge, publish,
4
+ # distribute, sublicense, and/or sell copies of the Software, subject to the following conditions:
5
+ # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
6
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND.
7
+ # Based on code licensed under the Apache License, Version 2.0 by Google Inc. and HuggingFace Inc. team (Copyright 2024).
8
+ # coding=utf-8
9
+
10
+ """PyTorch PaliGemmamodel."""
11
+
12
+ from dataclasses import dataclass
13
+ from typing import List, Optional, Tuple, Union
14
+
15
+ import torch
16
+ import torch.utils.checkpoint
17
+ from torch import nn
18
+ from torch.linalg import inv
19
+ import torchvision.transforms.functional as F
20
+
21
+ import os
22
+ from transformers.cache_utils import Cache, HybridCache, StaticCache
23
+ from transformers.generation import GenerationMixin
24
+ from transformers.modeling_utils import PreTrainedModel, PretrainedConfig
25
+ from transformers.utils import (
26
+ ModelOutput,
27
+ add_start_docstrings,
28
+ add_start_docstrings_to_model_forward,
29
+ is_flash_attn_2_available,
30
+ logging,
31
+ replace_return_docstrings,
32
+ )
33
+ from .configuration_spatialvla import SpatialVLAConfig
34
+ from .modeling_ego3d import Ego3DPositionEmbeddingMLP, process_zoe
35
+ from .modeling_gemma2 import Gemma2ForCausalLM
36
+
37
+ if is_flash_attn_2_available():
38
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
39
+
40
+ from transformers import AutoModel, AutoModelForCausalLM, ZoeDepthForDepthEstimation
41
+
42
+
43
+ logger = logging.get_logger(__name__)
44
+
45
+ _CONFIG_FOR_DOC = "PaliGemmaConfig"
46
+
47
+ # constant
48
+ SIGLIP_MEAN, SIGLIP_STD = (0.5, 0.5, 0.5), (0.5, 0.5, 0.5)
49
+
50
+ # Adapted from transformers.models.llama.modeling_llama.LlamaModel._prepare_4d_causal_attention_mask_with_cache_position
51
+ # But Paligemma has no causal mask on prefix
52
+ def _prepare_4d_causal_attention_mask_with_cache_position(
53
+ attention_mask: torch.Tensor,
54
+ sequence_length: int,
55
+ target_length: int,
56
+ dtype: torch.dtype,
57
+ device: torch.device,
58
+ min_dtype: float,
59
+ cache_position: torch.Tensor,
60
+ batch_size: int,
61
+ is_training: bool = False,
62
+ token_type_ids: torch.Tensor = None,
63
+ **kwargs,
64
+ ):
65
+ """
66
+ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
67
+ `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
68
+
69
+ Args:
70
+ attention_mask (`torch.Tensor`):
71
+ A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`.
72
+ sequence_length (`int`):
73
+ The sequence length being processed.
74
+ target_length (`int`):
75
+ The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet.
76
+ dtype (`torch.dtype`):
77
+ The dtype to use for the 4D attention mask.
78
+ device (`torch.device`):
79
+ The device to plcae the 4D attention mask on.
80
+ min_dtype (`float`):
81
+ The minimum value representable with the dtype `dtype`.
82
+ cache_position (`torch.Tensor`):
83
+ Indices depicting the position of the input sequence tokens in the sequence.
84
+ batch_size (`torch.Tensor`):
85
+ Batch size.
86
+ is_training (`bool`):
87
+ Whether the model is in training mode or in inference. The condition is checked by presence/absence of `token_type_ids/labels`
88
+ """
89
+ if attention_mask is not None and attention_mask.dim() == 4:
90
+ # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
91
+ causal_mask = attention_mask
92
+ else:
93
+ causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device)
94
+ # Causal diagonal mask only if training, otherwise attend to the whole prefix. Training-specific attn for prefix is handled below
95
+ if sequence_length != 1:
96
+ if is_training:
97
+ causal_mask = torch.triu(causal_mask, diagonal=1)
98
+ else:
99
+ causal_mask[:, :sequence_length] = 0.0
100
+
101
+ causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)
102
+ causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
103
+ if attention_mask is not None:
104
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
105
+ mask_length = attention_mask.shape[-1]
106
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device)
107
+ padding_mask = padding_mask == 0
108
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
109
+ padding_mask, min_dtype
110
+ )
111
+ # we are training thus we need to create a full mask on the image + prefix but causal on suffix
112
+ if is_training:
113
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
114
+ token_type_ids[:, None, None, :].to(causal_mask.device) == 0, 0
115
+ )
116
+ return causal_mask
117
+
118
+
119
+ @dataclass
120
+ class SpatialVLACausalLMOutputWithPast(ModelOutput):
121
+ """
122
+ Base class for PaliGemmacausal language model (or autoregressive) outputs.
123
+
124
+ Args:
125
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
126
+ Language modeling loss (for next-token prediction).
127
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.text_config.vocab_size)`):
128
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
129
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
130
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
131
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`)
132
+
133
+ Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
134
+ `past_key_values` input) to speed up sequential decoding.
135
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
136
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
137
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
138
+
139
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
140
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
141
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
142
+ sequence_length)`.
143
+
144
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
145
+ heads.
146
+ image_hidden_states (`torch.FloatTensor`, *optional*):
147
+ A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
148
+ image_hidden_states of the model produced by the vision encoder after projecting last hidden state.
149
+ """
150
+
151
+ loss: Optional[torch.FloatTensor] = None
152
+ logits: torch.FloatTensor = None
153
+ past_key_values: Optional[Union[List[torch.FloatTensor], Cache]] = None
154
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
155
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
156
+ image_hidden_states: Optional[torch.FloatTensor] = None
157
+
158
+
159
+ class SpatialVLAMultiModalProjector(nn.Module):
160
+ def __init__(self, config: SpatialVLAConfig):
161
+ super().__init__()
162
+ self.linear = nn.Linear(config.vision_config.hidden_size, config.vision_config.projection_dim, bias=True)
163
+
164
+ def forward(self, image_features):
165
+ hidden_states = self.linear(image_features)
166
+
167
+ return hidden_states
168
+
169
+
170
+ PALIGEMMA_START_DOCSTRING = r"""
171
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
172
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
173
+ etc.)
174
+
175
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
176
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
177
+ and behavior.
178
+
179
+ Parameters:
180
+ config ([`PaliGemmaConfig`] or [`PaliGemmaVisionConfig`]):
181
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
182
+ load the weights associated with the model, only the configuration. Check out the
183
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
184
+ """
185
+
186
+
187
+ @add_start_docstrings(
188
+ "The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
189
+ PALIGEMMA_START_DOCSTRING,
190
+ )
191
+ class SpatialVLAPreTrainedModel(PreTrainedModel):
192
+ config_class = SpatialVLAConfig
193
+ base_model_prefix = "model"
194
+ supports_gradient_checkpointing = True
195
+ _no_split_modules = ["SpatialVLAMultiModalProjector", "ZoeDepthForDepthEstimation", "Ego3DPositionEmbeddingMLP"]
196
+ _skip_keys_device_placement = "past_key_values"
197
+ _supports_cache_class = True
198
+ _supports_quantized_cache = True
199
+ _supports_static_cache = True
200
+ _supports_cache_class = True
201
+ _supports_flash_attn_2 = True
202
+ _supports_sdpa = True
203
+
204
+ def _init_weights(self, module):
205
+ # important: this ported version of PaliGemmaisn't meant for training from scratch - only
206
+ # inference and fine-tuning
207
+ std = (
208
+ self.config.initializer_range
209
+ if hasattr(self.config, "initializer_range")
210
+ else self.config.text_config.initializer_range
211
+ )
212
+
213
+ if hasattr(module, "class_embedding"):
214
+ module.class_embedding.data.normal_(mean=0.0, std=std)
215
+
216
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
217
+ module.weight.data.normal_(mean=0.0, std=std)
218
+ if module.bias is not None:
219
+ module.bias.data.zero_()
220
+ elif isinstance(module, nn.Embedding):
221
+ module.weight.data.normal_(mean=0.0, std=std)
222
+ if module.padding_idx is not None:
223
+ module.weight.data[module.padding_idx].zero_()
224
+
225
+
226
+ PALIGEMMA_INPUTS_DOCSTRING = r"""
227
+ Args:
228
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
229
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
230
+ it.
231
+
232
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
233
+ [`PreTrainedTokenizer.__call__`] for details.
234
+
235
+ [What are input IDs?](../glossary#input-ids)
236
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)):
237
+ The tensors corresponding to the input images. Pixel values can be obtained using
238
+ [`AutoImageProcessor`]. See [`SiglipImageProcessor.__call__`] for details ([]`PaliGemmaProcessor`] uses
239
+ [`SiglipImageProcessor`] for processing images).
240
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
241
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
242
+
243
+ - 1 for tokens that are **not masked**,
244
+ - 0 for tokens that are **masked**.
245
+
246
+ [What are attention masks?](../glossary#attention-mask)
247
+
248
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
249
+ [`PreTrainedTokenizer.__call__`] for details.
250
+
251
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
252
+ `past_key_values`).
253
+
254
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
255
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
256
+ information on the default strategy.
257
+
258
+ - 1 indicates the head is **not masked**,
259
+ - 0 indicates the head is **masked**.
260
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
261
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
262
+ config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
263
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
264
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
265
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
266
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
267
+
268
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
269
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
270
+
271
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
272
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
273
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
274
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
275
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
276
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
277
+ model's internal embedding lookup matrix.
278
+ use_cache (`bool`, *optional*):
279
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
280
+ `past_key_values`).
281
+ output_attentions (`bool`, *optional*):
282
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
283
+ tensors for more detail.
284
+ output_hidden_states (`bool`, *optional*):
285
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
286
+ more detail.
287
+ return_dict (`bool`, *optional*):
288
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
289
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
290
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
291
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
292
+ the complete sequence length.
293
+ """
294
+
295
+
296
+ @add_start_docstrings(
297
+ """The PALIGEMMA model which consists of a vision backbone and a language model.""",
298
+ PALIGEMMA_START_DOCSTRING,
299
+ )
300
+ class SpatialVLAForConditionalGeneration(SpatialVLAPreTrainedModel, GenerationMixin):
301
+ def __init__(self, config: SpatialVLAConfig, vision_model=None, vision_zoe_model=None, projector_model=None, language_model=None):
302
+ super().__init__(config)
303
+ # vision model
304
+ self.vision_tower = vision_model or AutoModel.from_config(config=config.vision_config)
305
+ # projector
306
+ self.multi_modal_projector = projector_model or SpatialVLAMultiModalProjector(config)
307
+ # language model
308
+ self.vocab_size = config.text_config.vocab_size
309
+ if language_model is None:
310
+ language_model = Gemma2ForCausalLM(config=config.text_config) if config.text_config.model_type == "gemma2" else AutoModelForCausalLM.from_config(config=config.text_config)
311
+ # set tile key
312
+ if language_model._tied_weights_keys is not None:
313
+ self._tied_weights_keys = [f"language_model.{k}" for k in language_model._tied_weights_keys]
314
+ self.language_model = language_model
315
+
316
+ if config.use_vision_zoe:
317
+ # zoe model
318
+ self.vision_zoe_model = vision_zoe_model or ZoeDepthForDepthEstimation(config.vision_zoe_config)
319
+ self.position_embedding_3d = Ego3DPositionEmbeddingMLP(
320
+ config.ego3d_patch_reso**2 * 3, num_pos_feats=config.vision_config.hidden_size, n_freqs=config.n_freqs
321
+ )
322
+ # register buffer
323
+ patch_size, reso, image_size = config.vision_config.patch_size, config.ego3d_patch_reso, config.vision_config.image_size
324
+ y, x = torch.meshgrid(torch.arange(0, image_size, patch_size // reso), torch.arange(0, image_size, patch_size // reso), indexing="ij") # (h//sp w//sp)
325
+ y, x = y + patch_size / reso / 2, x + patch_size / reso / 2
326
+ uv_h = torch.stack([x, y, torch.ones_like(x)], dim=0).reshape(3, -1) # (3 hw)
327
+ self.register_buffer("uv_h", uv_h, persistent=False)
328
+
329
+ # NOTE: add shared addtional spatial token embeddings for <ACTION> <IMG>
330
+ if config.use_spatial_token:
331
+ self.spatial_embed_tokens = nn.Embedding(self.config.spatial_token_num, config.text_config.hidden_size)
332
+ else:
333
+ self.spatial_embed_tokens = None
334
+
335
+ self.pad_token_id = self.config.pad_token_id if self.config.pad_token_id is not None else -1
336
+ # self.post_init() # BUG: cause from_pretrained failed!
337
+ # self.position_embedding_3d._reset_parameters()
338
+
339
+
340
+ def backproject_patch(self, K: torch.Tensor, depth: torch.Tensor, patch_size=14, reso=2) -> torch.Tensor:
341
+ """
342
+ Backproject depth map to 3D points in camera coordinate.
343
+ Args:
344
+ K: camera intrinsic matrix (b 3 3)
345
+ depth: depth map (b 1 h w)
346
+ pixel_offset: offset to the pixel coordinate
347
+ """
348
+ # __import__("ipdb").set_trace()
349
+ b, c, h, w = depth.shape
350
+ hp, wp = h // patch_size, w // patch_size
351
+ sub_hp = sub_wp = reso
352
+ patch_depth = torch.nn.functional.interpolate(depth, size=(hp * reso, wp * reso), mode="area").reshape(b, c, -1)
353
+
354
+ # import torchvision; torchvision.utils.save_image(zoe_pixel_values[0], "zoe_image.png")
355
+ p_cam = (inv(K.float()) @ self.uv_h.float()) * patch_depth # (b 3 3) @ (3 hw) -> (b 3 hw) * (b 1 hw) -> (b 3 hw)
356
+ patch_p_cam = p_cam.reshape(b, 3, hp, sub_hp, wp, sub_wp).permute(0, 2, 4, 3, 5, 1).reshape(b, hp * wp, -1)
357
+ return patch_p_cam
358
+
359
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.get_input_embeddings with Llava->PaliGemma
360
+ def get_input_embeddings(self):
361
+ return self.language_model.get_input_embeddings()
362
+
363
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.set_input_embeddings with Llava->PaliGemma
364
+ def set_input_embeddings(self, value):
365
+ self.language_model.set_input_embeddings(value)
366
+
367
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.get_output_embeddings with Llava->PaliGemma
368
+ def get_output_embeddings(self):
369
+ return self.language_model.get_output_embeddings()
370
+
371
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.set_output_embeddings with Llava->PaliGemma
372
+ def set_output_embeddings(self, new_embeddings):
373
+ self.language_model.set_output_embeddings(new_embeddings)
374
+
375
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.set_decoder with Llava->PaliGemma
376
+ def set_decoder(self, decoder):
377
+ self.language_model.set_decoder(decoder)
378
+
379
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.get_decoder with Llava->PaliGemma
380
+ def get_decoder(self):
381
+ return self.language_model.get_decoder()
382
+
383
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.tie_weights with Llava->PaliGemma
384
+ def tie_weights(self):
385
+ return self.language_model.tie_weights()
386
+
387
+ def resize_token_embeddings(
388
+ self,
389
+ new_num_tokens: Optional[int] = None,
390
+ pad_to_multiple_of: Optional[int] = None,
391
+ mean_resizing: bool = True,
392
+ ) -> nn.Embedding:
393
+ # TODO: is_deepspeed_zero3_enabled gather
394
+ print(f"resize token embeddings from {self.language_model.get_output_embeddings().weight.shape} to (*,{new_num_tokens})")
395
+ model_embeds = self.language_model.resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)
396
+
397
+ # update base model and current model config
398
+ vocab_size = model_embeds.weight.shape[0]
399
+ self.config.text_config.vocab_size = self.vocab_size = self.config._vocab_size = vocab_size
400
+ self.tie_weights()
401
+ return model_embeds
402
+
403
+ def _update_causal_mask(
404
+ self,
405
+ attention_mask,
406
+ token_type_ids,
407
+ past_key_values,
408
+ cache_position,
409
+ input_ids=None,
410
+ inputs_embeds=None,
411
+ is_training: bool = False,
412
+ ):
413
+ if self.config.text_config._attn_implementation == "flash_attention_2":
414
+ if attention_mask is not None and 0.0 in attention_mask:
415
+ return attention_mask
416
+ return None
417
+
418
+ using_static_cache = isinstance(past_key_values, StaticCache)
419
+ min_dtype = torch.finfo(self.dtype).min
420
+ inputs_lead_dim = input_ids.shape[0] if input_ids is not None else inputs_embeds.shape[0]
421
+ sequence_length = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
422
+ if using_static_cache:
423
+ target_length = past_key_values.get_max_cache_shape()
424
+ elif isinstance(past_key_values, HybridCache):
425
+ target_length = past_key_values.get_max_cache_shape()
426
+ else:
427
+ target_length = (
428
+ attention_mask.shape[-1]
429
+ if isinstance(attention_mask, torch.Tensor)
430
+ else cache_position[0] + sequence_length + 1
431
+ )
432
+
433
+ if attention_mask is not None and attention_mask.dim() == 4:
434
+ # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
435
+ return attention_mask
436
+
437
+ causal_mask = torch.full(
438
+ (sequence_length, target_length), fill_value=min_dtype, dtype=self.dtype, device=cache_position.device
439
+ )
440
+ # Causal diagonal mask only if training, otherwise attend to the whole prefix. Training-specific attn for prefix is handled below
441
+ if sequence_length != 1:
442
+ if is_training:
443
+ causal_mask = torch.triu(causal_mask, diagonal=1)
444
+ else:
445
+ causal_mask[:, :sequence_length] = 0.0
446
+
447
+ causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)
448
+ causal_mask = causal_mask[None, None, :, :].expand(inputs_lead_dim, 1, -1, -1)
449
+ if attention_mask is not None:
450
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
451
+ mask_length = attention_mask.shape[-1]
452
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device)
453
+ padding_mask = padding_mask == 0
454
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
455
+ padding_mask, min_dtype
456
+ )
457
+ # we are training thus we need to create a full mask on the image + prefix but causal on suffix
458
+ if is_training:
459
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
460
+ token_type_ids[:, None, None, :].to(causal_mask.device) == 0, 0
461
+ )
462
+ return causal_mask
463
+
464
+ def get_image_features(self, pixel_values: torch.FloatTensor, intrinsic: torch.FloatTensor):
465
+ """
466
+ Obtains image last hidden states from the vision tower and apply multimodal projection.
467
+
468
+ Args:
469
+ pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`)
470
+ The tensors corresponding to the input images.
471
+ Returns:
472
+ image_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`).
473
+ """
474
+ # mintrinsic = intrinsic.reshape(-1, 3, 3)
475
+ # siglip vision tower
476
+ siglip_pixel_values = F.normalize(pixel_values, mean=SIGLIP_MEAN, std=SIGLIP_STD)
477
+ image_outputs = self.vision_tower(siglip_pixel_values)
478
+
479
+ # ego3d position encoding
480
+ if self.config.use_vision_zoe:
481
+ zoe_pixel_values, ph, pw = process_zoe(pixel_values, pad_mode="reflect")
482
+ with torch.no_grad():
483
+ pvh, pvw = pixel_values.shape[-2:]
484
+ depth = self.vision_zoe_model(pixel_values=zoe_pixel_values).predicted_depth
485
+ depth = torch.nn.functional.interpolate(
486
+ depth.unsqueeze(1),
487
+ size=(pvh+2*ph, pvw+2*pw),
488
+ mode="bicubic",
489
+ align_corners=True,
490
+ )[..., ph:-ph, pw:-pw]
491
+ # depth = torch.clamp(depth, 0., 4.0) # NOTE: we find that depth w/o clamp performs better
492
+ xyz = self.backproject_patch(
493
+ intrinsic, depth, patch_size=self.config.vision_config.patch_size, reso=self.config.ego3d_patch_reso
494
+ ) # (b, n, 3*4)
495
+ pos_embed_3d = self.position_embedding_3d(xyz)
496
+ selected_image_feature = image_outputs.last_hidden_state + pos_embed_3d
497
+ else:
498
+ selected_image_feature = image_outputs.last_hidden_state
499
+ image_features = self.multi_modal_projector(selected_image_feature)
500
+ image_features = image_features / (self.config.text_config.hidden_size**0.5)
501
+ return image_features
502
+
503
+ @add_start_docstrings_to_model_forward(PALIGEMMA_INPUTS_DOCSTRING)
504
+ @replace_return_docstrings(output_type=SpatialVLACausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
505
+ def forward(
506
+ self,
507
+ input_ids: torch.LongTensor = None,
508
+ pixel_values: torch.FloatTensor = None,
509
+ actions: Optional[torch.FloatTensor] = None,
510
+ intrinsic: Optional[torch.Tensor] = None,
511
+ attention_mask: Optional[torch.Tensor] = None,
512
+ position_ids: Optional[torch.LongTensor] = None,
513
+ past_key_values: Optional[Union[List[torch.FloatTensor], Cache]] = None,
514
+ token_type_ids: Optional[torch.LongTensor] = None,
515
+ cache_position: Optional[torch.LongTensor] = None,
516
+ inputs_embeds: Optional[torch.FloatTensor] = None,
517
+ labels: Optional[torch.LongTensor] = None,
518
+ use_cache: Optional[bool] = None,
519
+ output_attentions: Optional[bool] = None,
520
+ output_hidden_states: Optional[bool] = None,
521
+ return_dict: Optional[bool] = None,
522
+ num_logits_to_keep: int = 0,
523
+ ) -> Union[Tuple, SpatialVLACausalLMOutputWithPast]:
524
+ r"""
525
+ Args:
526
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
527
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
528
+ config.text_config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
529
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.text_config.vocab_size]`.
530
+
531
+ num_logits_to_keep (`int`, *optional*):
532
+ Calculate logits for the last `num_logits_to_keep` tokens. If `0`, calculate logits for all
533
+ `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
534
+ token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
535
+
536
+ Returns:
537
+
538
+ Example:
539
+
540
+ ```python
541
+ >>> from PIL import Image
542
+ >>> import requests
543
+ >>> from transformers import AutoProcessor, PaliGemmaForConditionalGeneration
544
+
545
+ >>> model = PaliGemmaForConditionalGeneration.from_pretrained("google/PaliGemma-test-224px-hf")
546
+ >>> processor = AutoProcessor.from_pretrained("google/PaliGemma-test-224px-hf")
547
+
548
+ >>> prompt = "answer en Where is the cow standing?"
549
+ >>> url = "https://huggingface.co/gv-hf/PaliGemma-test-224px-hf/resolve/main/cow_beach_1.png"
550
+ >>> image = Image.open(requests.get(url, stream=True).raw)
551
+
552
+ >>> inputs = processor(images=image, text=prompt, return_tensors="pt")
553
+
554
+ >>> # Generate
555
+ >>> generate_ids = model.generate(**inputs, max_length=30)
556
+ >>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
557
+ "answer en Where is the cow standing?\nbeach"
558
+ ```"""
559
+ # print(f"**************************************\n \
560
+ # input_ids {input_ids} \n \
561
+ # labels {labels} \n \
562
+ # token_type_ids {token_type_ids} \n \
563
+ # attention_mask {attention_mask} \n \
564
+ # actions {actions} \n \
565
+ # **************************************"
566
+ # )
567
+ # print(f"model.language_model.config._attn_implementation {self.language_model.config._attn_implementation} model.config.vision_config._attn_implementation_internal {self.config.vision_config._attn_implementation_internal} \n \
568
+ # model.vision_tower.config._attn_implementation {self.vision_tower.config._attn_implementation} model.config.vision_config._attn_implementation_internal {self.config.vision_config._attn_implementation_internal}")
569
+ # __import__("ipdb").set_trace()
570
+ if (input_ids is None) ^ (inputs_embeds is not None):
571
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
572
+
573
+ if pixel_values is not None and inputs_embeds is not None:
574
+ raise ValueError(
575
+ "You cannot specify both pixel_values and inputs_embeds at the same time, and must specify either one"
576
+ )
577
+
578
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
579
+ output_hidden_states = (
580
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
581
+ )
582
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
583
+
584
+ is_training = token_type_ids is not None and labels is not None
585
+
586
+ if inputs_embeds is None:
587
+ inputs_embeds = self.get_input_embeddings()(input_ids).clone() ## avoid checkpint grad True
588
+
589
+ # NOTE: replace the fixed embeddings with trainable spatial embeddings
590
+ # BUG: LoRA causes inputs_embeds requires_grad = True
591
+ # peft: https://github.com/huggingface/peft/blob/ec92cdcc41fe1b141bfe1e0da69b38a7e601cc80/src/peft/peft_model.py#L687
592
+ # hf: https://github.com/huggingface/transformers/blob/05260a1fc1c8571a2b421ce72b680d5f1bc3e5a4/src/transformers/modeling_utils.py#L2545
593
+ # lora w/ prompt: https://discuss.huggingface.co/t/combine-between-lora-and-prompt-tunning/65151
594
+ if self.config.use_spatial_token:
595
+ spatial_selected = (input_ids >= self.config.action_token_begin_idx) & (input_ids < self.config.action_token_begin_idx + self.config.spatial_token_num)
596
+ inputs_embeds[spatial_selected] = inputs_embeds[spatial_selected] * 0.0 + self.spatial_embed_tokens(input_ids[spatial_selected] - self.config.action_token_begin_idx)
597
+
598
+ if cache_position is None:
599
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
600
+ cache_position = torch.arange(
601
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
602
+ )
603
+
604
+ if position_ids is None:
605
+ position_ids = cache_position.unsqueeze(0) + 1 # Paligemma positions are 1-indexed
606
+
607
+ # Merge text and images
608
+ if pixel_values is not None:
609
+ image_features = self.get_image_features(pixel_values, intrinsic)
610
+
611
+ special_image_mask = (input_ids == self.config.image_token_index).unsqueeze(-1)
612
+ special_image_mask = special_image_mask.expand_as(inputs_embeds).to(inputs_embeds.device)
613
+ if inputs_embeds[special_image_mask].numel() != image_features.numel():
614
+ image_tokens_in_text = torch.sum(input_ids == self.config.image_token_index)
615
+ raise ValueError(
616
+ f"Number of images does not match number of special image tokens in the input text. "
617
+ f"Got {image_tokens_in_text} image tokens in the text but {image_features.shape[0] * image_features.shape[1]} "
618
+ "tokens from image embeddings."
619
+ )
620
+ image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype)
621
+ inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features)
622
+
623
+ # mask out pad-token-ids in labels for BC
624
+ if labels is not None and self.pad_token_id in labels:
625
+ logger.warning_once(
626
+ "`labels` contains `pad_token_id` which will be masked with `config.ignore_index`. ",
627
+ "You have to mask out `pad_token_id` when preparing `labels`, this behavior will be removed in v.4.46.",
628
+ )
629
+ labels = torch.where(input_ids == self.pad_token_id, self.config.ignore_index, labels)
630
+
631
+ causal_mask = self._update_causal_mask(
632
+ attention_mask, token_type_ids, past_key_values, cache_position, input_ids, inputs_embeds, is_training
633
+ )
634
+ outputs = self.language_model(
635
+ attention_mask=causal_mask,
636
+ position_ids=position_ids,
637
+ past_key_values=past_key_values,
638
+ inputs_embeds=inputs_embeds,
639
+ use_cache=use_cache,
640
+ output_attentions=output_attentions,
641
+ output_hidden_states=output_hidden_states,
642
+ return_dict=return_dict,
643
+ cache_position=cache_position,
644
+ num_logits_to_keep=num_logits_to_keep,
645
+ )
646
+
647
+ logits = outputs.logits
648
+ loss = None
649
+ if labels is not None:
650
+ # Upcast to float if we need to compute the loss to avoid potential precision issues
651
+ logits = logits.float()
652
+ shift_logits = logits[..., :-1, :]
653
+ shift_labels = labels[..., 1:]
654
+ if attention_mask is not None:
655
+ # we use the input attention mask to shift the logits and labels, because it is 2D.
656
+ # we also crop attn mask in case it is longer, which happens in PrefixTuning with peft
657
+ shift_attention_mask = attention_mask[:, -shift_logits.shape[1] :].to(logits.device)
658
+ shift_logits = shift_logits[shift_attention_mask.to(logits.device) != 0].contiguous()
659
+ shift_labels = shift_labels[shift_attention_mask.to(shift_labels.device) != 0].contiguous()
660
+ else:
661
+ shift_logits = shift_logits.contiguous()
662
+ shift_labels = shift_labels.contiguous()
663
+ # Flatten the tokens
664
+ loss_fct = nn.CrossEntropyLoss()
665
+
666
+ flat_logits = shift_logits.view(-1, self.config.text_config.vocab_size)
667
+ flat_labels = shift_labels.view(-1).to(shift_logits.device)
668
+ loss = loss_fct(flat_logits, flat_labels)
669
+ if not return_dict:
670
+ output = (logits,) + outputs[1:]
671
+ return (loss,) + output if loss is not None else output
672
+
673
+ return SpatialVLACausalLMOutputWithPast(
674
+ loss=loss,
675
+ logits=logits,
676
+ past_key_values=outputs.past_key_values,
677
+ hidden_states=outputs.hidden_states,
678
+ attentions=outputs.attentions,
679
+ image_hidden_states=image_features if pixel_values is not None else None,
680
+ )
681
+
682
+ def prepare_inputs_for_generation(
683
+ self,
684
+ input_ids,
685
+ past_key_values=None,
686
+ inputs_embeds=None,
687
+ cache_position=None,
688
+ position_ids=None,
689
+ pixel_values=None,
690
+ intrinsic=None,
691
+ attention_mask=None,
692
+ token_type_ids=None,
693
+ use_cache=True,
694
+ num_logits_to_keep=None,
695
+ labels=None,
696
+ **kwargs,
697
+ ):
698
+ # Overwritten -- custom `position_ids` and `pixel_values` handling
699
+ model_inputs = self.language_model.prepare_inputs_for_generation(
700
+ input_ids,
701
+ past_key_values=past_key_values,
702
+ inputs_embeds=inputs_embeds,
703
+ attention_mask=attention_mask,
704
+ position_ids=position_ids,
705
+ cache_position=cache_position,
706
+ use_cache=use_cache,
707
+ num_logits_to_keep=num_logits_to_keep,
708
+ token_type_ids=token_type_ids,
709
+ **kwargs,
710
+ )
711
+
712
+ # position_ids in Paligemma are 1-indexed
713
+ if model_inputs.get("position_ids") is not None:
714
+ model_inputs["position_ids"] += 1
715
+ # If we're in cached decoding stage, pixel values should be None because input ids do not contain special image token anymore
716
+ # Otherwise we need pixel values to be passed to model. NOTE: use_cache=False needs pixel_values always
717
+ if cache_position[0] == 0:
718
+ model_inputs["pixel_values"] = pixel_values
719
+ is_training = token_type_ids is not None and labels is not None
720
+ if cache_position[0] == 0 and isinstance(past_key_values, HybridCache):
721
+ causal_mask = self._update_causal_mask(
722
+ attention_mask, token_type_ids, past_key_values, cache_position, input_ids, inputs_embeds, is_training
723
+ )
724
+ model_inputs["attention_mask"] = causal_mask
725
+ model_inputs["intrinsic"] = intrinsic
726
+ return model_inputs
727
+
728
+ @torch.no_grad()
729
+ def predict_action(
730
+ self,
731
+ model_inputs,
732
+ ) -> torch.Tensor:
733
+ model_inputs = model_inputs.to(torch.bfloat16).to(self.device)
734
+ input_len = model_inputs["input_ids"].shape[-1]
735
+ generation_outputs = self.generate(**model_inputs, max_new_tokens=256, do_sample=False)
736
+ return generation_outputs[:,input_len:]
737
+
738
+ @classmethod
739
+ def from_pretrained(
740
+ cls,
741
+ pretrained_model_name_or_path: Optional[Union[str, os.PathLike]],
742
+ *model_args,
743
+ config: Optional[Union[PretrainedConfig, str, os.PathLike]] = None,
744
+ cache_dir: Optional[Union[str, os.PathLike]] = None,
745
+ ignore_mismatched_sizes: bool = False,
746
+ force_download: bool = False,
747
+ local_files_only: bool = False,
748
+ token: Optional[Union[str, bool]] = None,
749
+ revision: str = "main",
750
+ use_safetensors: Optional[bool] = None,
751
+ weights_only: bool = True,
752
+ **kwargs,
753
+ ):
754
+ model = super().from_pretrained(
755
+ pretrained_model_name_or_path,
756
+ *model_args,
757
+ config=config,
758
+ cache_dir=cache_dir,
759
+ ignore_mismatched_sizes=ignore_mismatched_sizes,
760
+ force_download=force_download,
761
+ local_files_only=local_files_only,
762
+ token=token,
763
+ revision=revision,
764
+ use_safetensors=use_safetensors,
765
+ weights_only=weights_only,
766
+ **kwargs,
767
+ )
768
+ # NOTE: tie the weights of the embed_tokens with lm head (donot work if un_tie_weight)
769
+ # model.language_model.tie_weights()
770
+ # NOTE: tie the data of spatial_embed_tokens with embed_tokens (BUG: forweight sync issue in training)
771
+ if model.config.use_spatial_token:
772
+ model.language_model.model.embed_tokens.weight.data[-model.config.spatial_token_num:] = model.spatial_embed_tokens.weight.data
773
+ return model
preprocessor_config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_map": {
3
+ "AutoProcessor": "processing_spatialvla.SpatialVLAProcessor"
4
+ },
5
+ "do_convert_rgb": null,
6
+ "do_normalize": false,
7
+ "do_rescale": true,
8
+ "do_resize": true,
9
+ "image_mean": [
10
+ 0.5,
11
+ 0.5,
12
+ 0.5
13
+ ],
14
+ "image_processor_type": "SiglipImageProcessor",
15
+ "image_seq_length": 256,
16
+ "image_std": [
17
+ 0.5,
18
+ 0.5,
19
+ 0.5
20
+ ],
21
+ "processor_class": "SpatialVLAProcessor",
22
+ "resample": 3,
23
+ "rescale_factor": 0.00392156862745098,
24
+ "size": {
25
+ "height": 224,
26
+ "width": 224
27
+ }
28
+ }
processing_spatialvla.py ADDED
@@ -0,0 +1,439 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+ # Copyright (c) 2025 IPEC at Shanghai AI Laboratory
3
+ # Permission is hereby granted, free of charge, to use, copy, modify, merge, publish,
4
+ # distribute, sublicense, and/or sell copies of the Software, subject to the following conditions:
5
+ # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
6
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND.
7
+ # Based on code licensed under the Apache License, Version 2.0 by Google Inc. and HuggingFace Inc. team (Copyright 2024).
8
+ # coding=utf-8
9
+
10
+ """
11
+ Processor class for PaliGemma.
12
+ """
13
+
14
+ import logging
15
+ from typing import List, Optional, Union, Dict
16
+ import torch
17
+ import numpy as np
18
+
19
+ from transformers.feature_extraction_utils import BatchFeature
20
+ from transformers.image_utils import ImageInput, is_valid_image
21
+ from transformers.processing_utils import (
22
+ ImagesKwargs,
23
+ ProcessingKwargs,
24
+ ProcessorMixin,
25
+ TextKwargs,
26
+ Unpack,
27
+ _validate_images_text_input_order,
28
+ )
29
+ from transformers.tokenization_utils_base import (
30
+ AddedToken,
31
+ PreTokenizedInput,
32
+ TextInput,
33
+ )
34
+ from transformers.utils import logging
35
+ from .action_tokenizer import SphericalCoordinateActionTokenizer
36
+
37
+ logger = logging.get_logger(__name__)
38
+
39
+ IMAGE_TOKEN = "<image>"
40
+ EXTRA_TOKENS = [f"<loc{i:0>4}>" for i in range(1024)] + [f"<seg{i:0>3}>" for i in range(128)]
41
+
42
+
43
+ class PaliGemmaTextKwargs(TextKwargs):
44
+ suffix: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]]
45
+
46
+
47
+ class PaliGemmaImagesKwargs(ImagesKwargs):
48
+ do_convert_rgb: Optional[bool]
49
+
50
+
51
+ class PaliGemmaProcessorKwargs(ProcessingKwargs, total=False):
52
+ text_kwargs: PaliGemmaTextKwargs
53
+ images_kwargs: PaliGemmaImagesKwargs
54
+ _defaults = {
55
+ "text_kwargs": {
56
+ "padding": False,
57
+ },
58
+ "images_kwargs": {
59
+ "data_format": "channels_first",
60
+ },
61
+ }
62
+
63
+
64
+ # Copied from transformers.models.idefics2.processing_idefics2.is_url
65
+ def is_url(val) -> bool:
66
+ return isinstance(val, str) and val.startswith("http")
67
+
68
+
69
+ # Copied from transformers.models.idefics2.processing_idefics2.is_image_or_image_url
70
+ def is_image_or_image_url(elem):
71
+ return is_url(elem) or is_valid_image(elem)
72
+
73
+
74
+ def _is_str_or_image(elem):
75
+ return isinstance(elem, (str)) or is_image_or_image_url(elem)
76
+
77
+
78
+ def build_string_from_input(prompt, bos_token, image_seq_len, image_token, num_images):
79
+ """
80
+ Builds a string from the input prompt and image tokens.
81
+ For example, for the call:
82
+ build_string_from_input(
83
+ prompt="Prefix str"
84
+ bos_token="<s>",
85
+ image_seq_len=3,
86
+ image_token="<im>",
87
+ )
88
+ The output will be:
89
+ "<im><im><im><s>Initial str"
90
+ Args:
91
+ prompt (`List[Union[str, ImageInput]]`): The input prompt.
92
+ bos_token (`str`): The beginning of sentence token.
93
+ image_seq_len (`int`): The length of the image sequence.
94
+ image_token (`str`): The image token.
95
+ num_images (`int`): Number of images in the prompt.
96
+ """
97
+ return f"{image_token * image_seq_len * num_images}{bos_token}{prompt}\n"
98
+
99
+
100
+ # Copied from transformers.models.llava_next.image_processing_llava_next.make_batched_images
101
+ def make_batched_images(images) -> List[List[ImageInput]]:
102
+ """
103
+ Accepts images in list or nested list format, and makes a list of images for preprocessing.
104
+
105
+ Args:
106
+ images (`Union[List[List[ImageInput]], List[ImageInput], ImageInput]`):
107
+ The input image.
108
+
109
+ Returns:
110
+ list: A list of images.
111
+ """
112
+ if isinstance(images, (list, tuple)) and isinstance(images[0], (list, tuple)) and is_valid_image(images[0][0]):
113
+ return [img for img_list in images for img in img_list]
114
+
115
+ elif isinstance(images, (list, tuple)) and is_valid_image(images[0]):
116
+ return images
117
+
118
+ elif is_valid_image(images):
119
+ return [images]
120
+
121
+ raise ValueError(f"Could not make batched video from {images}")
122
+
123
+
124
+ class SpatialVLAProcessor(ProcessorMixin):
125
+ r"""
126
+ Constructs a PaliGemma processor which wraps a PaliGemma image processor and a PaliGemma tokenizer into a single processor.
127
+
128
+ [`PaliGemmaProcessor`] offers all the functionalities of [`SiglipImageProcessor`] and [`LlamaTokenizerFast`]. See the
129
+ [`~PaliGemmaProcessor.__call__`] and [`~PaliGemmaProcessor.decode`] for more information.
130
+
131
+ Args:
132
+ image_processor ([`SiglipImageProcessor`], *optional*):
133
+ The image processor is a required input.
134
+ tokenizer ([`LlamaTokenizerFast`], *optional*):
135
+ The tokenizer is a required input.
136
+ chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
137
+ in a chat into a tokenizable string.
138
+ """
139
+
140
+ attributes = ["image_processor", "tokenizer"]
141
+ valid_kwargs = ["chat_template"]
142
+ image_processor_class = "SiglipImageProcessor"
143
+ tokenizer_class = ("GemmaTokenizer", "GemmaTokenizerFast")
144
+
145
+ def __init__(
146
+ self,
147
+ image_processor=None,
148
+ tokenizer=None,
149
+ chat_template=None,
150
+ statistics: Optional[dict] = None,
151
+ bin_policy=None,
152
+ intrinsic_config=None,
153
+ action_config=None,
154
+ num_obs_steps=1,
155
+ obs_delta=1,
156
+ action_chunk_size=1,
157
+ min_sigma=0.0,
158
+ **kwargs,
159
+ ):
160
+ if image_processor is None:
161
+ raise ValueError("You need to specify an `image_processor`.")
162
+ if tokenizer is None:
163
+ raise ValueError("You need to specify a `tokenizer`.")
164
+ if not hasattr(image_processor, "image_seq_length"):
165
+ raise ValueError("Image processor is missing an `image_seq_length` attribute.")
166
+
167
+ self.image_seq_length = image_processor.image_seq_length
168
+
169
+ if not hasattr(tokenizer, "image_token"):
170
+ image_token = AddedToken(IMAGE_TOKEN, normalized=False, special=True)
171
+ tokens_to_add = {"additional_special_tokens": [image_token]}
172
+ tokenizer.add_special_tokens(tokens_to_add)
173
+ self.image_token_id = tokenizer.convert_tokens_to_ids(IMAGE_TOKEN)
174
+ else:
175
+ self.image_token_id = tokenizer.image_token_id
176
+
177
+ tokenizer.add_tokens(EXTRA_TOKENS)
178
+ tokenizer.add_bos_token = False
179
+ tokenizer.add_eos_token = False
180
+
181
+ super().__init__(image_processor, tokenizer, chat_template=chat_template)
182
+
183
+ # action tokenizer
184
+ self.statistics = statistics if statistics else {}
185
+ self.bin_policy = bin_policy
186
+ self.min_sigma = min_sigma
187
+ self.intrinsic_config = intrinsic_config
188
+ self.action_config = action_config
189
+ self.num_obs_steps = num_obs_steps
190
+ self.obs_delta = obs_delta
191
+ self.action_chunk_size = action_chunk_size
192
+ self.dataset_intrinsics = {}
193
+ height, width = image_processor.size["height"], image_processor.size["width"]
194
+
195
+ for k, v in intrinsic_config.items():
196
+ K = torch.tensor(v["intrinsic"]).float()
197
+ h, w = v["height"], v["width"]
198
+ K[0, 0] *= width / w
199
+ K[1, 1] *= height / h
200
+ K[0, 2] *= width / w
201
+ K[1, 2] *= height / h
202
+ self.dataset_intrinsics[k] = K
203
+ print(f"scale intrinsic of {k} from {v['intrinsic']} to {K} ...")
204
+
205
+ self.action_tokenizer = SphericalCoordinateActionTokenizer(
206
+ tokenizer=tokenizer, num_bins=action_config["num_bins"],
207
+ bin_policy=bin_policy, use_spherical=action_config["use_spherical"],
208
+ min_sigma=min_sigma,
209
+ )
210
+
211
+ def __call__(
212
+ self,
213
+ images: ImageInput = None,
214
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
215
+ audio=None,
216
+ videos=None,
217
+ unnorm_key: Optional[str] = None,
218
+ suffix_actions: Optional[np.array] = None, # (t e)
219
+ **kwargs: Unpack[PaliGemmaProcessorKwargs],
220
+ ) -> BatchFeature:
221
+ """
222
+ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
223
+ and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to encode
224
+ the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to
225
+ SiglipImageProcessor's [`~SiglipImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring
226
+ of the above two methods for more information.
227
+
228
+ The usage for PaliGemma fine-tuning preparation is slightly different than usual. suffix passed are suffixes to
229
+ the prompt in `text`, and will be placed after the prompt. This is because attention is handled differently for
230
+ the prefix and the suffix. For instance,
231
+ ```python
232
+ image = PIL_cow_image
233
+ prompt = "answer en Where is the cow standing?"
234
+ suffix = "on the beach"
235
+ inputs = processor(text=prompt, images=image, suffix=suffix)
236
+ ```
237
+ Here `inputs` will contain the `input_ids` and `token_type_ids` that follow
238
+ ```python
239
+ inputs["input_ids"][:, 256:]
240
+ # tensor([[ 2, 6006, 603, 573, 13910, 9980, 235336, 108, 477, 573, 8318]])
241
+ inputs["token_type_ids"][:, 256:]
242
+ tensor([[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1]])
243
+ ```
244
+ Meaning the last three tokens are of "label" ("suffix") type while the other ones are of "prefix" type.
245
+
246
+
247
+ Args:
248
+ images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
249
+ The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
250
+ tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a
251
+ number of channels, H and W are image height and width.
252
+ text (`str`, `List[str]`, `List[List[str]]`):
253
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
254
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
255
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
256
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
257
+ If set, will return tensors of a particular framework. Acceptable values are:
258
+
259
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
260
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
261
+ - `'np'`: Return NumPy `np.ndarray` objects.
262
+ - `'jax'`: Return JAX `jnp.ndarray` objects.
263
+ suffix (`str`, `List[str]`, `List[List[str]]`):
264
+ The suffixes or batch of suffixes to be encoded. Only necessary for finetuning. See https://github.com/google-research/big_vision/blob/main/big_vision/configs/proj/paligemma/README.md
265
+ for more information. If your prompt is "<image> What is on the image", the suffix corresponds to the expected prediction "a cow sitting on a bench".
266
+
267
+ Returns:
268
+ [`BatchFeature`]: A [`BatchFeature`] with the following fields:
269
+
270
+ - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. If `suffix`
271
+ is provided, the `input_ids` will also contain the suffix input ids.
272
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
273
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
274
+ `None`).
275
+ - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
276
+ - **labels** -- Labels compatible with training if `suffix` is not None
277
+ """
278
+ # check if images and text inputs are reversed for BC
279
+ images, text = _validate_images_text_input_order(images, text)
280
+
281
+ output_kwargs = self._merge_kwargs(
282
+ PaliGemmaProcessorKwargs,
283
+ tokenizer_init_kwargs=self.tokenizer.init_kwargs,
284
+ **kwargs,
285
+ )
286
+ if suffix_actions is not None:
287
+ action_tokens = self.action_tokenizer(suffix_actions) # (n,3)
288
+ suffix="".join(action_tokens.flatten())
289
+ else:
290
+ suffix = output_kwargs["text_kwargs"].pop("suffix", None)
291
+
292
+ return_token_type_ids = True if suffix is not None else False
293
+
294
+ if images is None:
295
+ raise ValueError("`images` are expected as arguments to a `PaliGemmaProcessor` instance.")
296
+ if text is None:
297
+ logger.warning_once(
298
+ "You are using PaliGemma without a text prefix. It will perform as a picture-captioning model."
299
+ )
300
+ text = ""
301
+
302
+ if _is_str_or_image(text):
303
+ text = [text]
304
+ elif isinstance(text, list) and _is_str_or_image(text[0]):
305
+ pass
306
+
307
+ if text is not None and images is not None:
308
+ if not any(IMAGE_TOKEN in sample for sample in text):
309
+ # logger.warning(
310
+ # "You are passing both `text` and `images` to `PaliGemmaProcessor`. The processor expects special "
311
+ # "image tokens in the text, as many tokens as there are images per each text. It is recommended to "
312
+ # "add `<image>` tokens in the very beginning of your text. For this call, we will infer how many images "
313
+ # "each text has and add special tokens."
314
+ # )
315
+ if isinstance(text, List) and isinstance(images, List):
316
+ if len(images) != len(text):
317
+ raise ValueError(
318
+ f"Received {len(images)} images for {len(text)} prompts. Each prompt should be associated with an image or list of images."
319
+ )
320
+
321
+ # make a nested list of lists to be able to iterate over the images and text below
322
+ if is_valid_image(images):
323
+ images = [[images]]
324
+ elif isinstance(images, list) and is_valid_image(images[0]):
325
+ images = [[image] for image in images]
326
+ elif not (isinstance(images, list) and isinstance(images[0], list) and is_valid_image(images[0][0])):
327
+ raise ValueError("images must be an image, list of images or list of list of images")
328
+
329
+ if suffix is not None and _is_str_or_image(suffix):
330
+ suffix = [suffix]
331
+ if suffix is not None:
332
+ suffix = [sfx + self.tokenizer.eos_token for sfx in suffix]
333
+
334
+ input_strings = [
335
+ build_string_from_input(
336
+ prompt=prompt,
337
+ bos_token=self.tokenizer.bos_token,
338
+ image_seq_len=self.image_seq_length,
339
+ image_token=IMAGE_TOKEN,
340
+ num_images=len(image_list) if isinstance(image_list, list) else 1,
341
+ )
342
+ for prompt, image_list in zip(text, images)
343
+ ]
344
+ images = make_batched_images(images)
345
+ else:
346
+ expanded_samples = []
347
+ for sample in text:
348
+ expanded_sample = sample.replace(IMAGE_TOKEN, IMAGE_TOKEN * self.image_seq_length)
349
+ bos_rfind_index = expanded_sample.rfind(IMAGE_TOKEN)
350
+ bos_index = bos_rfind_index + len(IMAGE_TOKEN) if bos_rfind_index != -1 else 0
351
+ expanded_sample = (
352
+ expanded_sample[:bos_index] + self.tokenizer.bos_token + expanded_sample[bos_index:]
353
+ )
354
+ expanded_samples.append(expanded_sample)
355
+ input_strings = [f"{sample}\n" for sample in expanded_samples]
356
+ pixel_values = self.image_processor(images, **output_kwargs["images_kwargs"])["pixel_values"]
357
+
358
+ # max_length has to account for the image tokens
359
+ if output_kwargs["text_kwargs"].get("max_length", None) is not None:
360
+ output_kwargs["text_kwargs"]["max_length"] += self.image_seq_length
361
+
362
+ inputs = self.tokenizer(
363
+ input_strings,
364
+ text_pair=suffix,
365
+ return_token_type_ids=return_token_type_ids,
366
+ **output_kwargs["text_kwargs"],
367
+ )
368
+
369
+ intrinsic = self.dataset_intrinsics[unnorm_key] if unnorm_key in self.dataset_intrinsics else self.dataset_intrinsics["default"]
370
+ return_data = {**inputs, "pixel_values": pixel_values, "intrinsic": intrinsic}
371
+
372
+ if return_token_type_ids:
373
+ labels = inputs["input_ids"].masked_fill(inputs["token_type_ids"] == 0, -100)
374
+ return_data.update({"labels": labels})
375
+ return BatchFeature(data=return_data)
376
+
377
+ # Copied from transformers.models.clip.processing_clip.CLIPProcessor.batch_decode with CLIP->Gemma
378
+ def batch_decode(self, *args, **kwargs):
379
+ """
380
+ This method forwards all its arguments to GemmaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
381
+ refer to the docstring of this method for more information.
382
+ """
383
+ return self.tokenizer.batch_decode(*args, **kwargs)
384
+
385
+ # Copied from transformers.models.clip.processing_clip.CLIPProcessor.decode with CLIP->Gemma
386
+ def decode(self, *args, **kwargs):
387
+ """
388
+ This method forwards all its arguments to GemmaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
389
+ the docstring of this method for more information.
390
+ """
391
+ return self.tokenizer.decode(*args, **kwargs)
392
+
393
+ @property
394
+ # Copied from transformers.models.clip.processing_clip.CLIPProcessor.model_input_names with CLIP->PaliGemma
395
+ def model_input_names(self):
396
+ tokenizer_input_names = self.tokenizer.model_input_names
397
+ image_processor_input_names = self.image_processor.model_input_names
398
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
399
+
400
+ def decode_actions(
401
+ self,
402
+ generation_outputs: torch.Tensor,
403
+ unnorm_key: Optional[str] = None,
404
+ ) -> Dict[str, torch.Tensor]:
405
+ action_token_num = 3 # translation + rotation + gripper
406
+ predicted_action_token_ids = generation_outputs[0, : action_token_num * self.action_chunk_size].detach().cpu().long().numpy()
407
+ assert self.tokenizer.eos_token != predicted_action_token_ids[-1], "[error] actions contain EOS token, please check you truncation settings!"
408
+
409
+ if predicted_action_token_ids.shape[0] < action_token_num * self.action_chunk_size: # pad with zeros
410
+ print(f"[warning] Padding zero action!")
411
+ predicted_action_token_ids = np.concatenate(
412
+ [
413
+ predicted_action_token_ids,
414
+ np.zeros(action_token_num * self.action_chunk_size - predicted_action_token_ids.shape[0], dtype=np.longlong),
415
+ ]
416
+ )
417
+ predicted_action_token_ids = predicted_action_token_ids.reshape(-1, action_token_num)
418
+ normalized_action_chunks = self.action_tokenizer.decode_token_ids_to_actions(predicted_action_token_ids)
419
+
420
+ # Unnormalize actions
421
+ if unnorm_key is None:
422
+ print(f"🔥 unnorm_key {unnorm_key} is not in statistics, use next one")
423
+ unnorm_key = next(self.statistics.keys())
424
+ action_norm_stats = self.statistics[unnorm_key]["action"]
425
+
426
+ action_dim = len(action_norm_stats["q01"])
427
+ mask = np.array(action_norm_stats.get("mask", np.ones(action_dim)), dtype=bool)
428
+ action_high, action_low = np.array(action_norm_stats["q99"]), np.array(action_norm_stats["q01"])
429
+
430
+ actions = []
431
+ for normalized_actions in normalized_action_chunks:
432
+ action = np.where(
433
+ mask,
434
+ 0.5 * (normalized_actions + 1) * (action_high - action_low) + action_low,
435
+ normalized_actions,
436
+ )
437
+ actions.append(action)
438
+ actions = np.stack(actions)
439
+ return {"actions": actions, "action_ids": predicted_action_token_ids}
processor_config.json ADDED
@@ -0,0 +1,3702 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "action_chunk_size": 4,
3
+ "action_config": {
4
+ "distribution": "gaussian",
5
+ "num_bins": {
6
+ "gripper": 2,
7
+ "rotation": {
8
+ "pitch_bins": 16,
9
+ "roll_bins": 16,
10
+ "yaw_bins": 16
11
+ },
12
+ "total": 8194,
13
+ "translation": {
14
+ "phi_bins": 32,
15
+ "r_bins": 8,
16
+ "theta_bins": 16
17
+ }
18
+ },
19
+ "use_spherical": true
20
+ },
21
+ "auto_map": {
22
+ "AutoProcessor": "processing_spatialvla.SpatialVLAProcessor"
23
+ },
24
+ "bin_policy": {
25
+ "rotation": {
26
+ "pitch_bins": [
27
+ -1.0,
28
+ -0.6785015894338633,
29
+ -0.516796358161167,
30
+ -0.3978678314258641,
31
+ -0.29907867426319246,
32
+ -0.21158608510441518,
33
+ -0.13081651669135252,
34
+ -0.05392877158612959,
35
+ 0.02113881590329744,
36
+ 0.0961313749999302,
37
+ 0.17278161860263358,
38
+ 0.25310821063971767,
39
+ 0.33985580585203445,
40
+ 0.4373796767941653,
41
+ 0.5539451994131283,
42
+ 0.7100308525313351,
43
+ 0.9999999999999999
44
+ ],
45
+ "roll_bins": [
46
+ -1.0,
47
+ -0.7121298287894609,
48
+ -0.5564581819056097,
49
+ -0.440071773405789,
50
+ -0.3426461358467384,
51
+ -0.25595819395001274,
52
+ -0.17566893098554964,
53
+ -0.09904102149491184,
54
+ -0.024059205927849478,
55
+ 0.05100802578115137,
56
+ 0.12790631705350436,
57
+ 0.20869987492610076,
58
+ 0.2962359118858219,
59
+ 0.3951018734752948,
60
+ 0.5141779624401348,
61
+ 0.6762450862353777,
62
+ 1.0
63
+ ],
64
+ "yaw_bins": [
65
+ -1.0,
66
+ -0.6910047644696934,
67
+ -0.5313988287371314,
68
+ -0.4133376866679583,
69
+ -0.3150057290436059,
70
+ -0.22777658299365705,
71
+ -0.14715771012527992,
72
+ -0.07034330907230311,
73
+ 0.004712965738136004,
74
+ 0.07975252682496348,
75
+ 0.15651401950954372,
76
+ 0.23703420508371892,
77
+ 0.32409736463921823,
78
+ 0.4221473708283458,
79
+ 0.5396818128475004,
80
+ 0.6980345545587262,
81
+ 1.0
82
+ ]
83
+ },
84
+ "translation": {
85
+ "phi_bins": [
86
+ -3.1415926535897927,
87
+ -2.5597806593194092,
88
+ -2.1899702111786126,
89
+ -1.9071489188814448,
90
+ -1.6724463283141142,
91
+ -1.4683467869586326,
92
+ -1.2853487663890668,
93
+ -1.1176672338183495,
94
+ -0.961484031585327,
95
+ -0.8141204989748655,
96
+ -0.6736024210639718,
97
+ -0.5384120746595923,
98
+ -0.40733740832383114,
99
+ -0.279375002438531,
100
+ -0.15366425283265983,
101
+ -0.029440234757304742,
102
+ 0.0940021938080639,
103
+ 0.2173378027339352,
104
+ 0.34123726674747146,
105
+ 0.46639302836823826,
106
+ 0.5935473848733163,
107
+ 0.7235258808185444,
108
+ 0.857280204661428,
109
+ 0.9959469801163238,
110
+ 1.1409329906705301,
111
+ 1.2940454053271015,
112
+ 1.4577019170652383,
113
+ 1.6352913749303837,
114
+ 1.8318407243899377,
115
+ 2.0553733807372363,
116
+ 2.320069275631962,
117
+ 2.6552436426949604,
118
+ 3.141592653589793
119
+ ],
120
+ "r_bins": [
121
+ 2.220446049250313e-16,
122
+ 0.19677118231539265,
123
+ 0.3506298590504556,
124
+ 0.4881976731379496,
125
+ 0.621970275186659,
126
+ 0.7620978861167458,
127
+ 0.9228346010157172,
128
+ 1.1393317208802278,
129
+ 1.7320508075688767
130
+ ],
131
+ "theta_bins": [
132
+ 0.0,
133
+ 0.7067187338585303,
134
+ 0.9814199309359143,
135
+ 1.1752042640550222,
136
+ 1.3331175751173345,
137
+ 1.4713205387280388,
138
+ 1.5977846301055496,
139
+ 1.7172771763957553,
140
+ 1.8331248472067783,
141
+ 1.9480194771467687,
142
+ 2.0644993054216925,
143
+ 2.1853608246107656,
144
+ 2.314189357400805,
145
+ 2.456314355008026,
146
+ 2.621028843347318,
147
+ 2.828352346005421,
148
+ 3.141592653589793
149
+ ]
150
+ }
151
+ },
152
+ "intrinsic_config": {
153
+ "bridge_orig/1.0.0": {
154
+ "height": 480,
155
+ "intrinsic": [
156
+ [
157
+ 623.588,
158
+ 0,
159
+ 319.501
160
+ ],
161
+ [
162
+ 0,
163
+ 623.588,
164
+ 239.545
165
+ ],
166
+ [
167
+ 0,
168
+ 0,
169
+ 1
170
+ ]
171
+ ],
172
+ "width": 640
173
+ },
174
+ "default": {
175
+ "height": 480,
176
+ "intrinsic": [
177
+ [
178
+ 623.588,
179
+ 0,
180
+ 319.501
181
+ ],
182
+ [
183
+ 0,
184
+ 623.588,
185
+ 239.545
186
+ ],
187
+ [
188
+ 0,
189
+ 0,
190
+ 1
191
+ ]
192
+ ],
193
+ "width": 640
194
+ }
195
+ },
196
+ "min_sigma": 0.0,
197
+ "num_obs_steps": 1,
198
+ "obs_delta": 1,
199
+ "processor_class": "SpatialVLAProcessor",
200
+ "statistics": {
201
+ "fractal20220817_data/0.1.0": {
202
+ "action": {
203
+ "mean": [
204
+ 0.006987507455050945,
205
+ 0.006265853065997362,
206
+ -0.012625162489712238,
207
+ 0.04333285242319107,
208
+ -0.005756276659667492,
209
+ 0.0009130403632298112,
210
+ 0.5354204773902893
211
+ ],
212
+ "std": [
213
+ 0.06921109557151794,
214
+ 0.05970889702439308,
215
+ 0.0735311210155487,
216
+ 0.1561058759689331,
217
+ 0.1316441297531128,
218
+ 0.14593777060508728,
219
+ 0.49711623787879944
220
+ ],
221
+ "max": [
222
+ 2.9984593391418457,
223
+ 22.09052848815918,
224
+ 2.7507524490356445,
225
+ 1.570636510848999,
226
+ 1.5321086645126343,
227
+ 1.5691522359848022,
228
+ 1.0
229
+ ],
230
+ "min": [
231
+ -2.0204520225524902,
232
+ -5.497899532318115,
233
+ -2.031663417816162,
234
+ -1.569917917251587,
235
+ -1.569892168045044,
236
+ -1.570419430732727,
237
+ 0.0
238
+ ],
239
+ "q01": [
240
+ -0.22453527510166169,
241
+ -0.14820013284683228,
242
+ -0.231589707583189,
243
+ -0.3517994859814644,
244
+ -0.4193011274933815,
245
+ -0.43643461108207704,
246
+ 0.0
247
+ ],
248
+ "q99": [
249
+ 0.17824687153100965,
250
+ 0.14938379630446405,
251
+ 0.21842354819178575,
252
+ 0.5892666035890578,
253
+ 0.35272657424211445,
254
+ 0.44796681255102094,
255
+ 1.0
256
+ ],
257
+ "mask": [
258
+ true,
259
+ true,
260
+ true,
261
+ true,
262
+ true,
263
+ true,
264
+ false
265
+ ]
266
+ },
267
+ "proprio": {
268
+ "mean": [
269
+ 0.0,
270
+ 0.0,
271
+ 0.0,
272
+ 0.0,
273
+ 0.0,
274
+ 0.0,
275
+ 0.0
276
+ ],
277
+ "std": [
278
+ 0.0,
279
+ 0.0,
280
+ 0.0,
281
+ 0.0,
282
+ 0.0,
283
+ 0.0,
284
+ 0.0
285
+ ],
286
+ "max": [
287
+ 0.0,
288
+ 0.0,
289
+ 0.0,
290
+ 0.0,
291
+ 0.0,
292
+ 0.0,
293
+ 0.0
294
+ ],
295
+ "min": [
296
+ 0.0,
297
+ 0.0,
298
+ 0.0,
299
+ 0.0,
300
+ 0.0,
301
+ 0.0,
302
+ 0.0
303
+ ],
304
+ "q01": [
305
+ 0.0,
306
+ 0.0,
307
+ 0.0,
308
+ 0.0,
309
+ 0.0,
310
+ 0.0,
311
+ 0.0
312
+ ],
313
+ "q99": [
314
+ 0.0,
315
+ 0.0,
316
+ 0.0,
317
+ 0.0,
318
+ 0.0,
319
+ 0.0,
320
+ 0.0
321
+ ]
322
+ },
323
+ "num_transitions": 3786400,
324
+ "num_trajectories": 87212
325
+ },
326
+ "kuka/0.1.0": {
327
+ "action": {
328
+ "mean": [
329
+ -0.00046687963185831904,
330
+ 0.00040137648466043174,
331
+ -0.0012807906605303288,
332
+ 0.0,
333
+ 0.0,
334
+ -0.037225183099508286,
335
+ 0.4131543040275574
336
+ ],
337
+ "std": [
338
+ 0.020832739770412445,
339
+ 0.029158642515540123,
340
+ 0.0642285868525505,
341
+ 0.0,
342
+ 0.0,
343
+ 0.14224639534950256,
344
+ 0.4908643662929535
345
+ ],
346
+ "max": [
347
+ 0.1697135865688324,
348
+ 0.2777623236179352,
349
+ 0.43710532784461975,
350
+ 0.0,
351
+ 0.0,
352
+ 1.9684287309646606,
353
+ 1.0
354
+ ],
355
+ "min": [
356
+ -0.159867063164711,
357
+ -0.2892282009124756,
358
+ -0.2795473635196686,
359
+ 0.0,
360
+ 0.0,
361
+ -1.9875637292861938,
362
+ 0.0
363
+ ],
364
+ "q01": [
365
+ -0.06619441494345665,
366
+ -0.08713878810405731,
367
+ -0.15083016991615295,
368
+ 0.0,
369
+ 0.0,
370
+ -0.5415697038173676,
371
+ 0.0
372
+ ],
373
+ "q99": [
374
+ 0.06601839080452929,
375
+ 0.08732476785779003,
376
+ 0.18168179214000715,
377
+ 0.0,
378
+ 0.0,
379
+ 0.2923380345106127,
380
+ 1.0
381
+ ],
382
+ "mask": [
383
+ true,
384
+ true,
385
+ true,
386
+ true,
387
+ true,
388
+ true,
389
+ false
390
+ ]
391
+ },
392
+ "proprio": {
393
+ "mean": [
394
+ 0.0,
395
+ 0.0,
396
+ 0.0,
397
+ 0.0,
398
+ 0.0,
399
+ 0.0,
400
+ 0.0
401
+ ],
402
+ "std": [
403
+ 0.0,
404
+ 0.0,
405
+ 0.0,
406
+ 0.0,
407
+ 0.0,
408
+ 0.0,
409
+ 0.0
410
+ ],
411
+ "max": [
412
+ 0.0,
413
+ 0.0,
414
+ 0.0,
415
+ 0.0,
416
+ 0.0,
417
+ 0.0,
418
+ 0.0
419
+ ],
420
+ "min": [
421
+ 0.0,
422
+ 0.0,
423
+ 0.0,
424
+ 0.0,
425
+ 0.0,
426
+ 0.0,
427
+ 0.0
428
+ ],
429
+ "q01": [
430
+ 0.0,
431
+ 0.0,
432
+ 0.0,
433
+ 0.0,
434
+ 0.0,
435
+ 0.0,
436
+ 0.0
437
+ ],
438
+ "q99": [
439
+ 0.0,
440
+ 0.0,
441
+ 0.0,
442
+ 0.0,
443
+ 0.0,
444
+ 0.0,
445
+ 0.0
446
+ ]
447
+ },
448
+ "num_transitions": 2455879,
449
+ "num_trajectories": 209880
450
+ },
451
+ "bridge_orig/1.0.0": {
452
+ "action": {
453
+ "mean": [
454
+ 0.00023341714404523373,
455
+ 0.00013004327774979174,
456
+ -0.00012762591359205544,
457
+ -0.0001556579809403047,
458
+ -0.00040393328526988626,
459
+ 0.00023558337124995887,
460
+ 0.5764582753181458
461
+ ],
462
+ "std": [
463
+ 0.009765734896063805,
464
+ 0.013689505867660046,
465
+ 0.012667152099311352,
466
+ 0.028534479439258575,
467
+ 0.03063790127635002,
468
+ 0.07691770792007446,
469
+ 0.4973658621311188
470
+ ],
471
+ "max": [
472
+ 0.41691166162490845,
473
+ 0.25864794850349426,
474
+ 0.21218234300613403,
475
+ 3.122201919555664,
476
+ 1.8618112802505493,
477
+ 6.280478477478027,
478
+ 1.0
479
+ ],
480
+ "min": [
481
+ -0.4007510244846344,
482
+ -0.13874775171279907,
483
+ -0.22553899884223938,
484
+ -3.2010786533355713,
485
+ -1.8618112802505493,
486
+ -6.279075622558594,
487
+ 0.0
488
+ ],
489
+ "q01": [
490
+ -0.02872725307941437,
491
+ -0.04170349963009357,
492
+ -0.026093858778476715,
493
+ -0.08092105075716972,
494
+ -0.09288699507713317,
495
+ -0.20718276381492615,
496
+ 0.0
497
+ ],
498
+ "q99": [
499
+ 0.028309678435325586,
500
+ 0.040855254605412394,
501
+ 0.040161586627364146,
502
+ 0.08192047759890528,
503
+ 0.07792850524187081,
504
+ 0.20382574498653397,
505
+ 1.0
506
+ ],
507
+ "mask": [
508
+ true,
509
+ true,
510
+ true,
511
+ true,
512
+ true,
513
+ true,
514
+ false
515
+ ]
516
+ },
517
+ "proprio": {
518
+ "mean": [
519
+ 0.0,
520
+ 0.0,
521
+ 0.0,
522
+ 0.0,
523
+ 0.0,
524
+ 0.0,
525
+ 0.0
526
+ ],
527
+ "std": [
528
+ 0.0,
529
+ 0.0,
530
+ 0.0,
531
+ 0.0,
532
+ 0.0,
533
+ 0.0,
534
+ 0.0
535
+ ],
536
+ "max": [
537
+ 0.0,
538
+ 0.0,
539
+ 0.0,
540
+ 0.0,
541
+ 0.0,
542
+ 0.0,
543
+ 0.0
544
+ ],
545
+ "min": [
546
+ 0.0,
547
+ 0.0,
548
+ 0.0,
549
+ 0.0,
550
+ 0.0,
551
+ 0.0,
552
+ 0.0
553
+ ],
554
+ "q01": [
555
+ 0.0,
556
+ 0.0,
557
+ 0.0,
558
+ 0.0,
559
+ 0.0,
560
+ 0.0,
561
+ 0.0
562
+ ],
563
+ "q99": [
564
+ 0.0,
565
+ 0.0,
566
+ 0.0,
567
+ 0.0,
568
+ 0.0,
569
+ 0.0,
570
+ 0.0
571
+ ]
572
+ },
573
+ "num_transitions": 2135463,
574
+ "num_trajectories": 60064
575
+ },
576
+ "taco_play/0.1.0": {
577
+ "action": {
578
+ "mean": [
579
+ -0.0038459226489067078,
580
+ 0.009671436622738838,
581
+ 0.01278059184551239,
582
+ -0.0054037850350141525,
583
+ -0.009606562554836273,
584
+ -0.0024807206355035305,
585
+ 0.4263913035392761
586
+ ],
587
+ "std": [
588
+ 0.23254045844078064,
589
+ 0.3629826307296753,
590
+ 0.2869291603565216,
591
+ 0.261770635843277,
592
+ 0.24388927221298218,
593
+ 0.5216501355171204,
594
+ 0.49469029903411865
595
+ ],
596
+ "max": [
597
+ 1.4915844202041626,
598
+ 2.1842432022094727,
599
+ 2.6836395263671875,
600
+ 5.035226821899414,
601
+ 2.665864944458008,
602
+ 4.250768661499023,
603
+ 1.0
604
+ ],
605
+ "min": [
606
+ -4.242457866668701,
607
+ -3.192805051803589,
608
+ -1.3371467590332031,
609
+ -4.202683448791504,
610
+ -2.6722638607025146,
611
+ -3.3467135429382324,
612
+ 0.0
613
+ ],
614
+ "q01": [
615
+ -0.7106140398979186,
616
+ -1.056944659948349,
617
+ -0.5878450274467468,
618
+ -0.7682853937149048,
619
+ -0.7180147767066956,
620
+ -1.5527938604354858,
621
+ 0.0
622
+ ],
623
+ "q99": [
624
+ 0.6482916426658629,
625
+ 1.0051310062408447,
626
+ 0.9480248689651489,
627
+ 0.6926478147506714,
628
+ 0.6351067513227462,
629
+ 1.628010264635086,
630
+ 1.0
631
+ ],
632
+ "mask": [
633
+ true,
634
+ true,
635
+ true,
636
+ true,
637
+ true,
638
+ true,
639
+ false
640
+ ]
641
+ },
642
+ "proprio": {
643
+ "mean": [
644
+ 0.0,
645
+ 0.0,
646
+ 0.0,
647
+ 0.0,
648
+ 0.0,
649
+ 0.0,
650
+ 0.0
651
+ ],
652
+ "std": [
653
+ 0.0,
654
+ 0.0,
655
+ 0.0,
656
+ 0.0,
657
+ 0.0,
658
+ 0.0,
659
+ 0.0
660
+ ],
661
+ "max": [
662
+ 0.0,
663
+ 0.0,
664
+ 0.0,
665
+ 0.0,
666
+ 0.0,
667
+ 0.0,
668
+ 0.0
669
+ ],
670
+ "min": [
671
+ 0.0,
672
+ 0.0,
673
+ 0.0,
674
+ 0.0,
675
+ 0.0,
676
+ 0.0,
677
+ 0.0
678
+ ],
679
+ "q01": [
680
+ 0.0,
681
+ 0.0,
682
+ 0.0,
683
+ 0.0,
684
+ 0.0,
685
+ 0.0,
686
+ 0.0
687
+ ],
688
+ "q99": [
689
+ 0.0,
690
+ 0.0,
691
+ 0.0,
692
+ 0.0,
693
+ 0.0,
694
+ 0.0,
695
+ 0.0
696
+ ]
697
+ },
698
+ "num_transitions": 237798,
699
+ "num_trajectories": 3603
700
+ },
701
+ "jaco_play/0.1.0": {
702
+ "action": {
703
+ "mean": [
704
+ 0.0009658387862145901,
705
+ -0.005800850689411163,
706
+ -0.003950685728341341,
707
+ 0.0,
708
+ 0.0,
709
+ 0.0,
710
+ 0.34934908151626587
711
+ ],
712
+ "std": [
713
+ 0.12234985828399658,
714
+ 0.09678783267736435,
715
+ 0.1115543395280838,
716
+ 0.0,
717
+ 0.0,
718
+ 0.0,
719
+ 0.47682321071624756
720
+ ],
721
+ "max": [
722
+ 0.20000000298023224,
723
+ 0.20000000298023224,
724
+ 0.20000000298023224,
725
+ 0.0,
726
+ 0.0,
727
+ 0.0,
728
+ 1.0
729
+ ],
730
+ "min": [
731
+ -0.20000000298023224,
732
+ -0.20000000298023224,
733
+ -0.20000000298023224,
734
+ 0.0,
735
+ 0.0,
736
+ 0.0,
737
+ 0.0
738
+ ],
739
+ "q01": [
740
+ -0.20000000298023224,
741
+ -0.20000000298023224,
742
+ -0.20000000298023224,
743
+ 0.0,
744
+ 0.0,
745
+ 0.0,
746
+ 0.0
747
+ ],
748
+ "q99": [
749
+ 0.20000000298023224,
750
+ 0.20000000298023224,
751
+ 0.20000000298023224,
752
+ 0.0,
753
+ 0.0,
754
+ 0.0,
755
+ 1.0
756
+ ],
757
+ "mask": [
758
+ true,
759
+ true,
760
+ true,
761
+ true,
762
+ true,
763
+ true,
764
+ false
765
+ ]
766
+ },
767
+ "proprio": {
768
+ "mean": [
769
+ 0.0,
770
+ 0.0,
771
+ 0.0,
772
+ 0.0,
773
+ 0.0,
774
+ 0.0,
775
+ 0.0
776
+ ],
777
+ "std": [
778
+ 0.0,
779
+ 0.0,
780
+ 0.0,
781
+ 0.0,
782
+ 0.0,
783
+ 0.0,
784
+ 0.0
785
+ ],
786
+ "max": [
787
+ 0.0,
788
+ 0.0,
789
+ 0.0,
790
+ 0.0,
791
+ 0.0,
792
+ 0.0,
793
+ 0.0
794
+ ],
795
+ "min": [
796
+ 0.0,
797
+ 0.0,
798
+ 0.0,
799
+ 0.0,
800
+ 0.0,
801
+ 0.0,
802
+ 0.0
803
+ ],
804
+ "q01": [
805
+ 0.0,
806
+ 0.0,
807
+ 0.0,
808
+ 0.0,
809
+ 0.0,
810
+ 0.0,
811
+ 0.0
812
+ ],
813
+ "q99": [
814
+ 0.0,
815
+ 0.0,
816
+ 0.0,
817
+ 0.0,
818
+ 0.0,
819
+ 0.0,
820
+ 0.0
821
+ ]
822
+ },
823
+ "num_transitions": 77965,
824
+ "num_trajectories": 1085
825
+ },
826
+ "berkeley_cable_routing/0.1.0": {
827
+ "action": {
828
+ "mean": [
829
+ -0.07139858603477478,
830
+ 0.023608991876244545,
831
+ 0.10241956263780594,
832
+ 0.0,
833
+ 0.0,
834
+ 0.04967105761170387,
835
+ 0.0
836
+ ],
837
+ "std": [
838
+ 0.18155010044574738,
839
+ 0.18109896779060364,
840
+ 0.21220752596855164,
841
+ 0.0,
842
+ 0.0,
843
+ 0.3475516438484192,
844
+ 0.0
845
+ ],
846
+ "max": [
847
+ 0.9633283019065857,
848
+ 1.0,
849
+ 1.0,
850
+ 0.0,
851
+ 0.0,
852
+ 1.0,
853
+ 0.0
854
+ ],
855
+ "min": [
856
+ -0.9809081554412842,
857
+ -0.9554349184036255,
858
+ -0.9994775056838989,
859
+ 0.0,
860
+ 0.0,
861
+ -1.0,
862
+ 0.0
863
+ ],
864
+ "q01": [
865
+ -0.5534318816661835,
866
+ -0.4797285574674606,
867
+ -0.5314934802055359,
868
+ 0.0,
869
+ 0.0,
870
+ -0.8855219376087189,
871
+ 0.0
872
+ ],
873
+ "q99": [
874
+ 0.42652835428714786,
875
+ 0.5000944086909298,
876
+ 0.639823433756829,
877
+ 0.0,
878
+ 0.0,
879
+ 0.984243879914284,
880
+ 0.0
881
+ ],
882
+ "mask": [
883
+ true,
884
+ true,
885
+ true,
886
+ true,
887
+ true,
888
+ true,
889
+ false
890
+ ]
891
+ },
892
+ "proprio": {
893
+ "mean": [
894
+ 0.0,
895
+ 0.0,
896
+ 0.0,
897
+ 0.0,
898
+ 0.0,
899
+ 0.0,
900
+ 0.0
901
+ ],
902
+ "std": [
903
+ 0.0,
904
+ 0.0,
905
+ 0.0,
906
+ 0.0,
907
+ 0.0,
908
+ 0.0,
909
+ 0.0
910
+ ],
911
+ "max": [
912
+ 0.0,
913
+ 0.0,
914
+ 0.0,
915
+ 0.0,
916
+ 0.0,
917
+ 0.0,
918
+ 0.0
919
+ ],
920
+ "min": [
921
+ 0.0,
922
+ 0.0,
923
+ 0.0,
924
+ 0.0,
925
+ 0.0,
926
+ 0.0,
927
+ 0.0
928
+ ],
929
+ "q01": [
930
+ 0.0,
931
+ 0.0,
932
+ 0.0,
933
+ 0.0,
934
+ 0.0,
935
+ 0.0,
936
+ 0.0
937
+ ],
938
+ "q99": [
939
+ 0.0,
940
+ 0.0,
941
+ 0.0,
942
+ 0.0,
943
+ 0.0,
944
+ 0.0,
945
+ 0.0
946
+ ]
947
+ },
948
+ "num_transitions": 42328,
949
+ "num_trajectories": 1647
950
+ },
951
+ "roboturk/0.1.0": {
952
+ "action": {
953
+ "mean": [
954
+ 0.001444889116100967,
955
+ -0.0015945355407893658,
956
+ -0.0011753803119063377,
957
+ 0.002301239175722003,
958
+ -0.0009382442804053426,
959
+ -0.00011485860886750743,
960
+ 0.5746025443077087
961
+ ],
962
+ "std": [
963
+ 0.0493537075817585,
964
+ 0.06354564428329468,
965
+ 0.06116492301225662,
966
+ 0.0955340564250946,
967
+ 0.08420011401176453,
968
+ 0.06517910957336426,
969
+ 0.4945177137851715
970
+ ],
971
+ "max": [
972
+ 0.39124172925949097,
973
+ 0.4601028263568878,
974
+ 0.4870833456516266,
975
+ 1.816888689994812,
976
+ 1.8240282535552979,
977
+ 1.4824820756912231,
978
+ 1.0
979
+ ],
980
+ "min": [
981
+ -0.6546999216079712,
982
+ -0.6365841031074524,
983
+ -0.4217723608016968,
984
+ -1.6695482730865479,
985
+ -1.8023357391357422,
986
+ -1.4630827903747559,
987
+ 0.0
988
+ ],
989
+ "q01": [
990
+ -0.1342635464668274,
991
+ -0.19996687173843383,
992
+ -0.1482972100377083,
993
+ -0.20720748245716095,
994
+ -0.09676413893699647,
995
+ -0.18075634717941286,
996
+ 0.0
997
+ ],
998
+ "q99": [
999
+ 0.14956976801157001,
1000
+ 0.1805950567126275,
1001
+ 0.18841815620660796,
1002
+ 0.21615413755178453,
1003
+ 0.09457383215427405,
1004
+ 0.18543301910162005,
1005
+ 1.0
1006
+ ],
1007
+ "mask": [
1008
+ true,
1009
+ true,
1010
+ true,
1011
+ true,
1012
+ true,
1013
+ true,
1014
+ false
1015
+ ]
1016
+ },
1017
+ "proprio": {
1018
+ "mean": [
1019
+ 0.0,
1020
+ 0.0,
1021
+ 0.0,
1022
+ 0.0,
1023
+ 0.0,
1024
+ 0.0,
1025
+ 0.0
1026
+ ],
1027
+ "std": [
1028
+ 0.0,
1029
+ 0.0,
1030
+ 0.0,
1031
+ 0.0,
1032
+ 0.0,
1033
+ 0.0,
1034
+ 0.0
1035
+ ],
1036
+ "max": [
1037
+ 0.0,
1038
+ 0.0,
1039
+ 0.0,
1040
+ 0.0,
1041
+ 0.0,
1042
+ 0.0,
1043
+ 0.0
1044
+ ],
1045
+ "min": [
1046
+ 0.0,
1047
+ 0.0,
1048
+ 0.0,
1049
+ 0.0,
1050
+ 0.0,
1051
+ 0.0,
1052
+ 0.0
1053
+ ],
1054
+ "q01": [
1055
+ 0.0,
1056
+ 0.0,
1057
+ 0.0,
1058
+ 0.0,
1059
+ 0.0,
1060
+ 0.0,
1061
+ 0.0
1062
+ ],
1063
+ "q99": [
1064
+ 0.0,
1065
+ 0.0,
1066
+ 0.0,
1067
+ 0.0,
1068
+ 0.0,
1069
+ 0.0,
1070
+ 0.0
1071
+ ]
1072
+ },
1073
+ "num_transitions": 187507,
1074
+ "num_trajectories": 1995
1075
+ },
1076
+ "viola/0.1.0": {
1077
+ "action": {
1078
+ "mean": [
1079
+ 0.04761853069067001,
1080
+ -0.029204534366726875,
1081
+ 0.055867329239845276,
1082
+ -0.0026185200549662113,
1083
+ 0.006867341697216034,
1084
+ -0.016821356490254402,
1085
+ 0.7323777675628662
1086
+ ],
1087
+ "std": [
1088
+ 0.39157867431640625,
1089
+ 0.40765219926834106,
1090
+ 0.40077903866767883,
1091
+ 0.10023998469114304,
1092
+ 0.08443189412355423,
1093
+ 0.10375089943408966,
1094
+ 0.442600816488266
1095
+ ],
1096
+ "max": [
1097
+ 1.0,
1098
+ 1.0,
1099
+ 1.0,
1100
+ 0.375,
1101
+ 0.36321428418159485,
1102
+ 0.375,
1103
+ 1.0
1104
+ ],
1105
+ "min": [
1106
+ -1.0,
1107
+ -1.0,
1108
+ -1.0,
1109
+ -0.375,
1110
+ -0.375,
1111
+ -0.375,
1112
+ 0.0
1113
+ ],
1114
+ "q01": [
1115
+ -0.9628571271896362,
1116
+ -1.0,
1117
+ -1.0,
1118
+ -0.26249998807907104,
1119
+ -0.21321429312229156,
1120
+ -0.3385714292526245,
1121
+ 0.0
1122
+ ],
1123
+ "q99": [
1124
+ 0.9114285707473755,
1125
+ 0.868571400642395,
1126
+ 1.0,
1127
+ 0.2817857265472412,
1128
+ 0.2239285707473755,
1129
+ 0.3557142913341522,
1130
+ 1.0
1131
+ ],
1132
+ "mask": [
1133
+ true,
1134
+ true,
1135
+ true,
1136
+ true,
1137
+ true,
1138
+ true,
1139
+ false
1140
+ ]
1141
+ },
1142
+ "proprio": {
1143
+ "mean": [
1144
+ 0.0,
1145
+ 0.0,
1146
+ 0.0,
1147
+ 0.0,
1148
+ 0.0,
1149
+ 0.0,
1150
+ 0.0
1151
+ ],
1152
+ "std": [
1153
+ 0.0,
1154
+ 0.0,
1155
+ 0.0,
1156
+ 0.0,
1157
+ 0.0,
1158
+ 0.0,
1159
+ 0.0
1160
+ ],
1161
+ "max": [
1162
+ 0.0,
1163
+ 0.0,
1164
+ 0.0,
1165
+ 0.0,
1166
+ 0.0,
1167
+ 0.0,
1168
+ 0.0
1169
+ ],
1170
+ "min": [
1171
+ 0.0,
1172
+ 0.0,
1173
+ 0.0,
1174
+ 0.0,
1175
+ 0.0,
1176
+ 0.0,
1177
+ 0.0
1178
+ ],
1179
+ "q01": [
1180
+ 0.0,
1181
+ 0.0,
1182
+ 0.0,
1183
+ 0.0,
1184
+ 0.0,
1185
+ 0.0,
1186
+ 0.0
1187
+ ],
1188
+ "q99": [
1189
+ 0.0,
1190
+ 0.0,
1191
+ 0.0,
1192
+ 0.0,
1193
+ 0.0,
1194
+ 0.0,
1195
+ 0.0
1196
+ ]
1197
+ },
1198
+ "num_transitions": 76324,
1199
+ "num_trajectories": 150
1200
+ },
1201
+ "berkeley_autolab_ur5/0.1.0": {
1202
+ "action": {
1203
+ "mean": [
1204
+ 0.0005683613708242774,
1205
+ 0.0012176961172372103,
1206
+ -0.0005296385497786105,
1207
+ 0.00021029777417425066,
1208
+ 6.069485243642703e-05,
1209
+ 0.0012049867073073983,
1210
+ 0.6298308372497559
1211
+ ],
1212
+ "std": [
1213
+ 0.011533073149621487,
1214
+ 0.007990497164428234,
1215
+ 0.009577799588441849,
1216
+ 0.009432999417185783,
1217
+ 0.016427574679255486,
1218
+ 0.011054049246013165,
1219
+ 0.482679545879364
1220
+ ],
1221
+ "max": [
1222
+ 0.019999999552965164,
1223
+ 0.019999999552965164,
1224
+ 0.019999999552965164,
1225
+ 0.06666667014360428,
1226
+ 0.06666667014360428,
1227
+ 0.06666667014360428,
1228
+ 1.0
1229
+ ],
1230
+ "min": [
1231
+ -0.019999999552965164,
1232
+ -0.019999999552965164,
1233
+ -0.019999999552965164,
1234
+ -0.06666667014360428,
1235
+ -0.06666667014360428,
1236
+ -0.06666667014360428,
1237
+ 0.0
1238
+ ],
1239
+ "q01": [
1240
+ -0.019999999552965164,
1241
+ -0.019999999552965164,
1242
+ -0.019999999552965164,
1243
+ -0.02628571353852749,
1244
+ -0.06666667014360428,
1245
+ -0.03847619146108627,
1246
+ 0.0
1247
+ ],
1248
+ "q99": [
1249
+ 0.019999999552965164,
1250
+ 0.019999999552965164,
1251
+ 0.019999999552965164,
1252
+ 0.031809523701667786,
1253
+ 0.06666667014360428,
1254
+ 0.036571428179740906,
1255
+ 1.0
1256
+ ],
1257
+ "mask": [
1258
+ true,
1259
+ true,
1260
+ true,
1261
+ true,
1262
+ true,
1263
+ true,
1264
+ false
1265
+ ]
1266
+ },
1267
+ "proprio": {
1268
+ "mean": [
1269
+ 0.0,
1270
+ 0.0,
1271
+ 0.0,
1272
+ 0.0,
1273
+ 0.0,
1274
+ 0.0,
1275
+ 0.0
1276
+ ],
1277
+ "std": [
1278
+ 0.0,
1279
+ 0.0,
1280
+ 0.0,
1281
+ 0.0,
1282
+ 0.0,
1283
+ 0.0,
1284
+ 0.0
1285
+ ],
1286
+ "max": [
1287
+ 0.0,
1288
+ 0.0,
1289
+ 0.0,
1290
+ 0.0,
1291
+ 0.0,
1292
+ 0.0,
1293
+ 0.0
1294
+ ],
1295
+ "min": [
1296
+ 0.0,
1297
+ 0.0,
1298
+ 0.0,
1299
+ 0.0,
1300
+ 0.0,
1301
+ 0.0,
1302
+ 0.0
1303
+ ],
1304
+ "q01": [
1305
+ 0.0,
1306
+ 0.0,
1307
+ 0.0,
1308
+ 0.0,
1309
+ 0.0,
1310
+ 0.0,
1311
+ 0.0
1312
+ ],
1313
+ "q99": [
1314
+ 0.0,
1315
+ 0.0,
1316
+ 0.0,
1317
+ 0.0,
1318
+ 0.0,
1319
+ 0.0,
1320
+ 0.0
1321
+ ]
1322
+ },
1323
+ "num_transitions": 97939,
1324
+ "num_trajectories": 1000
1325
+ },
1326
+ "toto/0.1.0": {
1327
+ "action": {
1328
+ "mean": [
1329
+ 0.3854214549064636,
1330
+ 0.007769507821649313,
1331
+ 0.3632742166519165,
1332
+ -0.665202796459198,
1333
+ 0.1890396624803543,
1334
+ 0.0329875648021698,
1335
+ 0.0
1336
+ ],
1337
+ "std": [
1338
+ 0.12211630493402481,
1339
+ 0.19378569722175598,
1340
+ 0.10178232192993164,
1341
+ 0.5725256204605103,
1342
+ 0.298846036195755,
1343
+ 0.32599160075187683,
1344
+ 0.0
1345
+ ],
1346
+ "max": [
1347
+ 0.6839867234230042,
1348
+ 0.4454185664653778,
1349
+ 0.7984078526496887,
1350
+ 2.120781660079956,
1351
+ 1.371164321899414,
1352
+ 1.4118704795837402,
1353
+ 0.0
1354
+ ],
1355
+ "min": [
1356
+ 0.09922284632921219,
1357
+ -0.5180193781852722,
1358
+ 0.13791072368621826,
1359
+ -2.635117530822754,
1360
+ -1.0734480619430542,
1361
+ -1.9282547235488892,
1362
+ 0.0
1363
+ ],
1364
+ "q01": [
1365
+ 0.1756722891330719,
1366
+ -0.3077590811252594,
1367
+ 0.235383919775486,
1368
+ -2.0908505964279174,
1369
+ -0.6191593289375306,
1370
+ -0.7488683319091797,
1371
+ 0.0
1372
+ ],
1373
+ "q99": [
1374
+ 0.6136963081359863,
1375
+ 0.33704194784164443,
1376
+ 0.6681221985816956,
1377
+ 0.7422861719131538,
1378
+ 0.7955395007133507,
1379
+ 0.740464625358582,
1380
+ 0.0
1381
+ ],
1382
+ "mask": [
1383
+ true,
1384
+ true,
1385
+ true,
1386
+ true,
1387
+ true,
1388
+ true,
1389
+ false
1390
+ ]
1391
+ },
1392
+ "proprio": {
1393
+ "mean": [
1394
+ 0.0,
1395
+ 0.0,
1396
+ 0.0,
1397
+ 0.0,
1398
+ 0.0,
1399
+ 0.0,
1400
+ 0.0
1401
+ ],
1402
+ "std": [
1403
+ 0.0,
1404
+ 0.0,
1405
+ 0.0,
1406
+ 0.0,
1407
+ 0.0,
1408
+ 0.0,
1409
+ 0.0
1410
+ ],
1411
+ "max": [
1412
+ 0.0,
1413
+ 0.0,
1414
+ 0.0,
1415
+ 0.0,
1416
+ 0.0,
1417
+ 0.0,
1418
+ 0.0
1419
+ ],
1420
+ "min": [
1421
+ 0.0,
1422
+ 0.0,
1423
+ 0.0,
1424
+ 0.0,
1425
+ 0.0,
1426
+ 0.0,
1427
+ 0.0
1428
+ ],
1429
+ "q01": [
1430
+ 0.0,
1431
+ 0.0,
1432
+ 0.0,
1433
+ 0.0,
1434
+ 0.0,
1435
+ 0.0,
1436
+ 0.0
1437
+ ],
1438
+ "q99": [
1439
+ 0.0,
1440
+ 0.0,
1441
+ 0.0,
1442
+ 0.0,
1443
+ 0.0,
1444
+ 0.0,
1445
+ 0.0
1446
+ ]
1447
+ },
1448
+ "num_transitions": 325699,
1449
+ "num_trajectories": 1003
1450
+ },
1451
+ "language_table/0.1.0": {
1452
+ "action": {
1453
+ "mean": [
1454
+ 0.00014891766477376223,
1455
+ -0.0005636657006107271,
1456
+ 0.0,
1457
+ 0.0,
1458
+ 0.0,
1459
+ 0.0,
1460
+ 1.0
1461
+ ],
1462
+ "std": [
1463
+ 0.030162859708070755,
1464
+ 0.04230763390660286,
1465
+ 0.0,
1466
+ 0.0,
1467
+ 0.0,
1468
+ 0.0,
1469
+ 0.0
1470
+ ],
1471
+ "max": [
1472
+ 0.23357294499874115,
1473
+ 0.24496802687644958,
1474
+ 0.0,
1475
+ 0.0,
1476
+ 0.0,
1477
+ 0.0,
1478
+ 1.0
1479
+ ],
1480
+ "min": [
1481
+ -0.21989956498146057,
1482
+ -0.23736150562763214,
1483
+ 0.0,
1484
+ 0.0,
1485
+ 0.0,
1486
+ 0.0,
1487
+ 1.0
1488
+ ],
1489
+ "q01": [
1490
+ -0.08179590478539467,
1491
+ -0.11795833334326744,
1492
+ 0.0,
1493
+ 0.0,
1494
+ 0.0,
1495
+ 0.0,
1496
+ 1.0
1497
+ ],
1498
+ "q99": [
1499
+ 0.08822273463010788,
1500
+ 0.1191693339496851,
1501
+ 0.0,
1502
+ 0.0,
1503
+ 0.0,
1504
+ 0.0,
1505
+ 1.0
1506
+ ],
1507
+ "mask": [
1508
+ true,
1509
+ true,
1510
+ true,
1511
+ true,
1512
+ true,
1513
+ true,
1514
+ false
1515
+ ]
1516
+ },
1517
+ "proprio": {
1518
+ "mean": [
1519
+ 0.0,
1520
+ 0.0,
1521
+ 0.0,
1522
+ 0.0,
1523
+ 0.0,
1524
+ 0.0,
1525
+ 0.0
1526
+ ],
1527
+ "std": [
1528
+ 0.0,
1529
+ 0.0,
1530
+ 0.0,
1531
+ 0.0,
1532
+ 0.0,
1533
+ 0.0,
1534
+ 0.0
1535
+ ],
1536
+ "max": [
1537
+ 0.0,
1538
+ 0.0,
1539
+ 0.0,
1540
+ 0.0,
1541
+ 0.0,
1542
+ 0.0,
1543
+ 0.0
1544
+ ],
1545
+ "min": [
1546
+ 0.0,
1547
+ 0.0,
1548
+ 0.0,
1549
+ 0.0,
1550
+ 0.0,
1551
+ 0.0,
1552
+ 0.0
1553
+ ],
1554
+ "q01": [
1555
+ 0.0,
1556
+ 0.0,
1557
+ 0.0,
1558
+ 0.0,
1559
+ 0.0,
1560
+ 0.0,
1561
+ 0.0
1562
+ ],
1563
+ "q99": [
1564
+ 0.0,
1565
+ 0.0,
1566
+ 0.0,
1567
+ 0.0,
1568
+ 0.0,
1569
+ 0.0,
1570
+ 0.0
1571
+ ]
1572
+ },
1573
+ "num_transitions": 7045476,
1574
+ "num_trajectories": 442226
1575
+ },
1576
+ "stanford_hydra_dataset_converted_externally_to_rlds/0.1.0": {
1577
+ "action": {
1578
+ "mean": [
1579
+ 0.0007790043600834906,
1580
+ 0.00013707877951674163,
1581
+ -0.000254859565757215,
1582
+ 0.0012903243768960238,
1583
+ -0.004751724191009998,
1584
+ 0.002692892448976636,
1585
+ 0.48855218291282654
1586
+ ],
1587
+ "std": [
1588
+ 0.008022183552384377,
1589
+ 0.009131456725299358,
1590
+ 0.00957438349723816,
1591
+ 0.04122224077582359,
1592
+ 0.03843001648783684,
1593
+ 0.046067025512456894,
1594
+ 0.49978113174438477
1595
+ ],
1596
+ "max": [
1597
+ 0.02499854564666748,
1598
+ 0.02499903365969658,
1599
+ 0.024999922141432762,
1600
+ 0.24974457919597626,
1601
+ 0.24997030198574066,
1602
+ 0.24999946355819702,
1603
+ 1.0
1604
+ ],
1605
+ "min": [
1606
+ -0.024999044835567474,
1607
+ -0.024999700486660004,
1608
+ -0.02499929815530777,
1609
+ -0.24993225932121277,
1610
+ -0.2499666064977646,
1611
+ -0.2499932497739792,
1612
+ 0.0
1613
+ ],
1614
+ "q01": [
1615
+ -0.019992006458342076,
1616
+ -0.02415412735193968,
1617
+ -0.022941758055239916,
1618
+ -0.11085530579090118,
1619
+ -0.12024572037160397,
1620
+ -0.13314770206809043,
1621
+ 0.0
1622
+ ],
1623
+ "q99": [
1624
+ 0.022886231057345868,
1625
+ 0.022358838934451335,
1626
+ 0.02410089675337076,
1627
+ 0.12370114490389822,
1628
+ 0.11323311634361738,
1629
+ 0.18474749639630164,
1630
+ 1.0
1631
+ ],
1632
+ "mask": [
1633
+ true,
1634
+ true,
1635
+ true,
1636
+ true,
1637
+ true,
1638
+ true,
1639
+ false
1640
+ ]
1641
+ },
1642
+ "proprio": {
1643
+ "mean": [
1644
+ 0.0,
1645
+ 0.0,
1646
+ 0.0,
1647
+ 0.0,
1648
+ 0.0,
1649
+ 0.0,
1650
+ 0.0
1651
+ ],
1652
+ "std": [
1653
+ 0.0,
1654
+ 0.0,
1655
+ 0.0,
1656
+ 0.0,
1657
+ 0.0,
1658
+ 0.0,
1659
+ 0.0
1660
+ ],
1661
+ "max": [
1662
+ 0.0,
1663
+ 0.0,
1664
+ 0.0,
1665
+ 0.0,
1666
+ 0.0,
1667
+ 0.0,
1668
+ 0.0
1669
+ ],
1670
+ "min": [
1671
+ 0.0,
1672
+ 0.0,
1673
+ 0.0,
1674
+ 0.0,
1675
+ 0.0,
1676
+ 0.0,
1677
+ 0.0
1678
+ ],
1679
+ "q01": [
1680
+ 0.0,
1681
+ 0.0,
1682
+ 0.0,
1683
+ 0.0,
1684
+ 0.0,
1685
+ 0.0,
1686
+ 0.0
1687
+ ],
1688
+ "q99": [
1689
+ 0.0,
1690
+ 0.0,
1691
+ 0.0,
1692
+ 0.0,
1693
+ 0.0,
1694
+ 0.0,
1695
+ 0.0
1696
+ ]
1697
+ },
1698
+ "num_transitions": 358234,
1699
+ "num_trajectories": 570
1700
+ },
1701
+ "austin_buds_dataset_converted_externally_to_rlds/0.1.0": {
1702
+ "action": {
1703
+ "mean": [
1704
+ -0.07678329944610596,
1705
+ 0.0036849123425781727,
1706
+ 0.05644941329956055,
1707
+ 0.0,
1708
+ 0.0,
1709
+ 0.0,
1710
+ 0.3510494828224182
1711
+ ],
1712
+ "std": [
1713
+ 0.6367746591567993,
1714
+ 0.3788914680480957,
1715
+ 0.47796377539634705,
1716
+ 0.0,
1717
+ 0.0,
1718
+ 0.0,
1719
+ 0.4772108495235443
1720
+ ],
1721
+ "max": [
1722
+ 1.0,
1723
+ 1.0,
1724
+ 1.0,
1725
+ 0.0,
1726
+ 0.0,
1727
+ 0.0,
1728
+ 1.0
1729
+ ],
1730
+ "min": [
1731
+ -1.0,
1732
+ -1.0,
1733
+ -1.0,
1734
+ 0.0,
1735
+ 0.0,
1736
+ 0.0,
1737
+ 0.0
1738
+ ],
1739
+ "q01": [
1740
+ -1.0,
1741
+ -0.9599999785423279,
1742
+ -0.8714285492897034,
1743
+ 0.0,
1744
+ 0.0,
1745
+ 0.0,
1746
+ 0.0
1747
+ ],
1748
+ "q99": [
1749
+ 1.0,
1750
+ 0.8600000143051147,
1751
+ 1.0,
1752
+ 0.0,
1753
+ 0.0,
1754
+ 0.0,
1755
+ 1.0
1756
+ ],
1757
+ "mask": [
1758
+ true,
1759
+ true,
1760
+ true,
1761
+ true,
1762
+ true,
1763
+ true,
1764
+ false
1765
+ ]
1766
+ },
1767
+ "proprio": {
1768
+ "mean": [
1769
+ 0.0,
1770
+ 0.0,
1771
+ 0.0,
1772
+ 0.0,
1773
+ 0.0,
1774
+ 0.0,
1775
+ 0.0
1776
+ ],
1777
+ "std": [
1778
+ 0.0,
1779
+ 0.0,
1780
+ 0.0,
1781
+ 0.0,
1782
+ 0.0,
1783
+ 0.0,
1784
+ 0.0
1785
+ ],
1786
+ "max": [
1787
+ 0.0,
1788
+ 0.0,
1789
+ 0.0,
1790
+ 0.0,
1791
+ 0.0,
1792
+ 0.0,
1793
+ 0.0
1794
+ ],
1795
+ "min": [
1796
+ 0.0,
1797
+ 0.0,
1798
+ 0.0,
1799
+ 0.0,
1800
+ 0.0,
1801
+ 0.0,
1802
+ 0.0
1803
+ ],
1804
+ "q01": [
1805
+ 0.0,
1806
+ 0.0,
1807
+ 0.0,
1808
+ 0.0,
1809
+ 0.0,
1810
+ 0.0,
1811
+ 0.0
1812
+ ],
1813
+ "q99": [
1814
+ 0.0,
1815
+ 0.0,
1816
+ 0.0,
1817
+ 0.0,
1818
+ 0.0,
1819
+ 0.0,
1820
+ 0.0
1821
+ ]
1822
+ },
1823
+ "num_transitions": 34112,
1824
+ "num_trajectories": 50
1825
+ },
1826
+ "nyu_franka_play_dataset_converted_externally_to_rlds/0.1.0": {
1827
+ "action": {
1828
+ "mean": [
1829
+ 0.0010219910182058811,
1830
+ -0.00012002632865915075,
1831
+ 0.00032894135802052915,
1832
+ 0.0015034276293590665,
1833
+ -0.002198528265580535,
1834
+ -0.0016632305923849344,
1835
+ 0.7230083346366882
1836
+ ],
1837
+ "std": [
1838
+ 0.013274150900542736,
1839
+ 0.013215919025242329,
1840
+ 0.01282210648059845,
1841
+ 0.27324533462524414,
1842
+ 0.05702253058552742,
1843
+ 0.03917279839515686,
1844
+ 0.44753193855285645
1845
+ ],
1846
+ "max": [
1847
+ 0.06424188613891602,
1848
+ 0.07027634978294373,
1849
+ 0.06129661202430725,
1850
+ 6.281067848205566,
1851
+ 0.1967729926109314,
1852
+ 0.26377415657043457,
1853
+ 1.0
1854
+ ],
1855
+ "min": [
1856
+ -0.05952230095863342,
1857
+ -0.07232445478439331,
1858
+ -0.06730806827545166,
1859
+ -6.278434753417969,
1860
+ -0.21479034423828125,
1861
+ -0.3627619743347168,
1862
+ 0.0
1863
+ ],
1864
+ "q01": [
1865
+ -0.03199600875377655,
1866
+ -0.032861671447753905,
1867
+ -0.03368805110454559,
1868
+ -0.12080862045288086,
1869
+ -0.12175218224525451,
1870
+ -0.11370223641395569,
1871
+ 0.0
1872
+ ],
1873
+ "q99": [
1874
+ 0.03101520001888276,
1875
+ 0.0373908892273903,
1876
+ 0.03646374464035038,
1877
+ 0.11764093399047852,
1878
+ 0.1258920183777809,
1879
+ 0.09366151213645942,
1880
+ 1.0
1881
+ ],
1882
+ "mask": [
1883
+ true,
1884
+ true,
1885
+ true,
1886
+ true,
1887
+ true,
1888
+ true,
1889
+ false
1890
+ ]
1891
+ },
1892
+ "proprio": {
1893
+ "mean": [
1894
+ 0.0,
1895
+ 0.0,
1896
+ 0.0,
1897
+ 0.0,
1898
+ 0.0,
1899
+ 0.0,
1900
+ 0.0
1901
+ ],
1902
+ "std": [
1903
+ 0.0,
1904
+ 0.0,
1905
+ 0.0,
1906
+ 0.0,
1907
+ 0.0,
1908
+ 0.0,
1909
+ 0.0
1910
+ ],
1911
+ "max": [
1912
+ 0.0,
1913
+ 0.0,
1914
+ 0.0,
1915
+ 0.0,
1916
+ 0.0,
1917
+ 0.0,
1918
+ 0.0
1919
+ ],
1920
+ "min": [
1921
+ 0.0,
1922
+ 0.0,
1923
+ 0.0,
1924
+ 0.0,
1925
+ 0.0,
1926
+ 0.0,
1927
+ 0.0
1928
+ ],
1929
+ "q01": [
1930
+ 0.0,
1931
+ 0.0,
1932
+ 0.0,
1933
+ 0.0,
1934
+ 0.0,
1935
+ 0.0,
1936
+ 0.0
1937
+ ],
1938
+ "q99": [
1939
+ 0.0,
1940
+ 0.0,
1941
+ 0.0,
1942
+ 0.0,
1943
+ 0.0,
1944
+ 0.0,
1945
+ 0.0
1946
+ ]
1947
+ },
1948
+ "num_transitions": 44875,
1949
+ "num_trajectories": 456
1950
+ },
1951
+ "furniture_bench_dataset_converted_externally_to_rlds/0.1.0": {
1952
+ "action": {
1953
+ "mean": [
1954
+ 0.0001461071806261316,
1955
+ 0.0010830992832779884,
1956
+ 0.0006224963581189513,
1957
+ -0.0033032014034688473,
1958
+ -0.002688060747459531,
1959
+ 0.018242614343762398,
1960
+ 0.48854944109916687
1961
+ ],
1962
+ "std": [
1963
+ 0.016107233241200447,
1964
+ 0.014891570433974266,
1965
+ 0.014014236629009247,
1966
+ 0.05827433615922928,
1967
+ 0.11417083442211151,
1968
+ 0.33479660749435425,
1969
+ 0.4999157190322876
1970
+ ],
1971
+ "max": [
1972
+ 0.10000000149011612,
1973
+ 0.10000000149011612,
1974
+ 0.10000000149011612,
1975
+ 0.8651833534240723,
1976
+ 1.0909736156463623,
1977
+ 2.863185405731201,
1978
+ 1.0
1979
+ ],
1980
+ "min": [
1981
+ -0.10495579987764359,
1982
+ -0.10939455777406693,
1983
+ -0.10000000149011612,
1984
+ -0.971906840801239,
1985
+ -1.0475432872772217,
1986
+ -3.06000018119812,
1987
+ 0.0
1988
+ ],
1989
+ "q01": [
1990
+ -0.053988199681043625,
1991
+ -0.05049169331789017,
1992
+ -0.032499241530895236,
1993
+ -0.1953887003660202,
1994
+ -0.41674559473991396,
1995
+ -0.8886768388748169,
1996
+ 0.0
1997
+ ],
1998
+ "q99": [
1999
+ 0.05414841488003723,
2000
+ 0.04965164884924884,
2001
+ 0.060055799782276154,
2002
+ 0.18231668293476103,
2003
+ 0.39867786407470646,
2004
+ 0.8772023963928218,
2005
+ 1.0
2006
+ ],
2007
+ "mask": [
2008
+ true,
2009
+ true,
2010
+ true,
2011
+ true,
2012
+ true,
2013
+ true,
2014
+ false
2015
+ ]
2016
+ },
2017
+ "proprio": {
2018
+ "mean": [
2019
+ 0.0,
2020
+ 0.0,
2021
+ 0.0,
2022
+ 0.0,
2023
+ 0.0,
2024
+ 0.0,
2025
+ 0.0
2026
+ ],
2027
+ "std": [
2028
+ 0.0,
2029
+ 0.0,
2030
+ 0.0,
2031
+ 0.0,
2032
+ 0.0,
2033
+ 0.0,
2034
+ 0.0
2035
+ ],
2036
+ "max": [
2037
+ 0.0,
2038
+ 0.0,
2039
+ 0.0,
2040
+ 0.0,
2041
+ 0.0,
2042
+ 0.0,
2043
+ 0.0
2044
+ ],
2045
+ "min": [
2046
+ 0.0,
2047
+ 0.0,
2048
+ 0.0,
2049
+ 0.0,
2050
+ 0.0,
2051
+ 0.0,
2052
+ 0.0
2053
+ ],
2054
+ "q01": [
2055
+ 0.0,
2056
+ 0.0,
2057
+ 0.0,
2058
+ 0.0,
2059
+ 0.0,
2060
+ 0.0,
2061
+ 0.0
2062
+ ],
2063
+ "q99": [
2064
+ 0.0,
2065
+ 0.0,
2066
+ 0.0,
2067
+ 0.0,
2068
+ 0.0,
2069
+ 0.0,
2070
+ 0.0
2071
+ ]
2072
+ },
2073
+ "num_transitions": 3948057,
2074
+ "num_trajectories": 5100
2075
+ },
2076
+ "ucsd_kitchen_dataset_converted_externally_to_rlds/0.1.0": {
2077
+ "action": {
2078
+ "mean": [
2079
+ 410.375732421875,
2080
+ 116.9518814086914,
2081
+ 192.35031127929688,
2082
+ -121.22441864013672,
2083
+ -33.84892654418945,
2084
+ 50.016136169433594,
2085
+ 0.741813600063324
2086
+ ],
2087
+ "std": [
2088
+ 122.81488037109375,
2089
+ 108.80094909667969,
2090
+ 130.30345153808594,
2091
+ 116.2820053100586,
2092
+ 27.62191390991211,
2093
+ 41.02091979980469,
2094
+ 0.4376337230205536
2095
+ ],
2096
+ "max": [
2097
+ 678.0,
2098
+ 400.0,
2099
+ 507.0,
2100
+ 180.00001525878906,
2101
+ 6.000013828277588,
2102
+ 116.99998474121094,
2103
+ 1.0
2104
+ ],
2105
+ "min": [
2106
+ 172.0,
2107
+ -166.0,
2108
+ -99.99999237060547,
2109
+ -180.00001525878906,
2110
+ -89.0,
2111
+ -96.00010681152344,
2112
+ 0.0
2113
+ ],
2114
+ "q01": [
2115
+ 200.00001052856445,
2116
+ -102.31004211425781,
2117
+ -94.99993370056153,
2118
+ -180.00001525878906,
2119
+ -88.00001525878906,
2120
+ -38.999977111816406,
2121
+ 0.0
2122
+ ],
2123
+ "q99": [
2124
+ 637.0,
2125
+ 368.30999999999995,
2126
+ 493.0,
2127
+ 180.00001525878906,
2128
+ 0.999983012676239,
2129
+ 105.00001525878906,
2130
+ 1.0
2131
+ ],
2132
+ "mask": [
2133
+ true,
2134
+ true,
2135
+ true,
2136
+ true,
2137
+ true,
2138
+ true,
2139
+ false
2140
+ ]
2141
+ },
2142
+ "proprio": {
2143
+ "mean": [
2144
+ 0.0,
2145
+ 0.0,
2146
+ 0.0,
2147
+ 0.0,
2148
+ 0.0,
2149
+ 0.0,
2150
+ 0.0
2151
+ ],
2152
+ "std": [
2153
+ 0.0,
2154
+ 0.0,
2155
+ 0.0,
2156
+ 0.0,
2157
+ 0.0,
2158
+ 0.0,
2159
+ 0.0
2160
+ ],
2161
+ "max": [
2162
+ 0.0,
2163
+ 0.0,
2164
+ 0.0,
2165
+ 0.0,
2166
+ 0.0,
2167
+ 0.0,
2168
+ 0.0
2169
+ ],
2170
+ "min": [
2171
+ 0.0,
2172
+ 0.0,
2173
+ 0.0,
2174
+ 0.0,
2175
+ 0.0,
2176
+ 0.0,
2177
+ 0.0
2178
+ ],
2179
+ "q01": [
2180
+ 0.0,
2181
+ 0.0,
2182
+ 0.0,
2183
+ 0.0,
2184
+ 0.0,
2185
+ 0.0,
2186
+ 0.0
2187
+ ],
2188
+ "q99": [
2189
+ 0.0,
2190
+ 0.0,
2191
+ 0.0,
2192
+ 0.0,
2193
+ 0.0,
2194
+ 0.0,
2195
+ 0.0
2196
+ ]
2197
+ },
2198
+ "num_transitions": 3970,
2199
+ "num_trajectories": 150
2200
+ },
2201
+ "austin_sailor_dataset_converted_externally_to_rlds/0.1.0": {
2202
+ "action": {
2203
+ "mean": [
2204
+ 0.011825386434793472,
2205
+ 0.0064610871486365795,
2206
+ 0.060236409306526184,
2207
+ 0.0,
2208
+ 0.0,
2209
+ 0.0016465834341943264,
2210
+ 0.5260950326919556
2211
+ ],
2212
+ "std": [
2213
+ 0.46348854899406433,
2214
+ 0.41240164637565613,
2215
+ 0.41186293959617615,
2216
+ 0.0,
2217
+ 0.0,
2218
+ 0.0578608438372612,
2219
+ 0.49893733859062195
2220
+ ],
2221
+ "max": [
2222
+ 1.0,
2223
+ 1.0,
2224
+ 1.0,
2225
+ 0.0,
2226
+ 0.0,
2227
+ 0.375,
2228
+ 1.0
2229
+ ],
2230
+ "min": [
2231
+ -1.0,
2232
+ -1.0,
2233
+ -1.0,
2234
+ 0.0,
2235
+ 0.0,
2236
+ -0.375,
2237
+ 0.0
2238
+ ],
2239
+ "q01": [
2240
+ -1.0,
2241
+ -0.9828571677207947,
2242
+ -0.6000000238418579,
2243
+ 0.0,
2244
+ 0.0,
2245
+ -0.17249999940395355,
2246
+ 0.0
2247
+ ],
2248
+ "q99": [
2249
+ 1.0,
2250
+ 0.9457142949104309,
2251
+ 1.0,
2252
+ 0.0,
2253
+ 0.0,
2254
+ 0.17892856895923615,
2255
+ 1.0
2256
+ ],
2257
+ "mask": [
2258
+ true,
2259
+ true,
2260
+ true,
2261
+ true,
2262
+ true,
2263
+ true,
2264
+ false
2265
+ ]
2266
+ },
2267
+ "proprio": {
2268
+ "mean": [
2269
+ 0.0,
2270
+ 0.0,
2271
+ 0.0,
2272
+ 0.0,
2273
+ 0.0,
2274
+ 0.0,
2275
+ 0.0
2276
+ ],
2277
+ "std": [
2278
+ 0.0,
2279
+ 0.0,
2280
+ 0.0,
2281
+ 0.0,
2282
+ 0.0,
2283
+ 0.0,
2284
+ 0.0
2285
+ ],
2286
+ "max": [
2287
+ 0.0,
2288
+ 0.0,
2289
+ 0.0,
2290
+ 0.0,
2291
+ 0.0,
2292
+ 0.0,
2293
+ 0.0
2294
+ ],
2295
+ "min": [
2296
+ 0.0,
2297
+ 0.0,
2298
+ 0.0,
2299
+ 0.0,
2300
+ 0.0,
2301
+ 0.0,
2302
+ 0.0
2303
+ ],
2304
+ "q01": [
2305
+ 0.0,
2306
+ 0.0,
2307
+ 0.0,
2308
+ 0.0,
2309
+ 0.0,
2310
+ 0.0,
2311
+ 0.0
2312
+ ],
2313
+ "q99": [
2314
+ 0.0,
2315
+ 0.0,
2316
+ 0.0,
2317
+ 0.0,
2318
+ 0.0,
2319
+ 0.0,
2320
+ 0.0
2321
+ ]
2322
+ },
2323
+ "num_transitions": 353094,
2324
+ "num_trajectories": 240
2325
+ },
2326
+ "austin_sirius_dataset_converted_externally_to_rlds/0.1.0": {
2327
+ "action": {
2328
+ "mean": [
2329
+ 0.077476866543293,
2330
+ 0.031955525279045105,
2331
+ 0.04244735836982727,
2332
+ 0.0,
2333
+ 0.0,
2334
+ -0.01603454165160656,
2335
+ 0.43260180950164795
2336
+ ],
2337
+ "std": [
2338
+ 0.3906330168247223,
2339
+ 0.2998153865337372,
2340
+ 0.2782270312309265,
2341
+ 0.0,
2342
+ 0.0,
2343
+ 0.08120641857385635,
2344
+ 0.49528202414512634
2345
+ ],
2346
+ "max": [
2347
+ 1.0002285242080688,
2348
+ 0.960608720779419,
2349
+ 1.105179786682129,
2350
+ 0.0,
2351
+ 0.0,
2352
+ 0.341785728931427,
2353
+ 1.0
2354
+ ],
2355
+ "min": [
2356
+ -1.0183025598526,
2357
+ -0.9800000190734863,
2358
+ -0.9774575233459473,
2359
+ 0.0,
2360
+ 0.0,
2361
+ -0.34607142210006714,
2362
+ 0.0
2363
+ ],
2364
+ "q01": [
2365
+ -0.780905865430832,
2366
+ -0.5667179036140442,
2367
+ -0.5254343223571777,
2368
+ 0.0,
2369
+ 0.0,
2370
+ -0.28495091378688814,
2371
+ 0.0
2372
+ ],
2373
+ "q99": [
2374
+ 0.9569637751579284,
2375
+ 0.6971374487876891,
2376
+ 0.8124888157844541,
2377
+ 0.0,
2378
+ 0.0,
2379
+ 0.1971428543329239,
2380
+ 1.0
2381
+ ],
2382
+ "mask": [
2383
+ true,
2384
+ true,
2385
+ true,
2386
+ true,
2387
+ true,
2388
+ true,
2389
+ false
2390
+ ]
2391
+ },
2392
+ "proprio": {
2393
+ "mean": [
2394
+ 0.0,
2395
+ 0.0,
2396
+ 0.0,
2397
+ 0.0,
2398
+ 0.0,
2399
+ 0.0,
2400
+ 0.0
2401
+ ],
2402
+ "std": [
2403
+ 0.0,
2404
+ 0.0,
2405
+ 0.0,
2406
+ 0.0,
2407
+ 0.0,
2408
+ 0.0,
2409
+ 0.0
2410
+ ],
2411
+ "max": [
2412
+ 0.0,
2413
+ 0.0,
2414
+ 0.0,
2415
+ 0.0,
2416
+ 0.0,
2417
+ 0.0,
2418
+ 0.0
2419
+ ],
2420
+ "min": [
2421
+ 0.0,
2422
+ 0.0,
2423
+ 0.0,
2424
+ 0.0,
2425
+ 0.0,
2426
+ 0.0,
2427
+ 0.0
2428
+ ],
2429
+ "q01": [
2430
+ 0.0,
2431
+ 0.0,
2432
+ 0.0,
2433
+ 0.0,
2434
+ 0.0,
2435
+ 0.0,
2436
+ 0.0
2437
+ ],
2438
+ "q99": [
2439
+ 0.0,
2440
+ 0.0,
2441
+ 0.0,
2442
+ 0.0,
2443
+ 0.0,
2444
+ 0.0,
2445
+ 0.0
2446
+ ]
2447
+ },
2448
+ "num_transitions": 279939,
2449
+ "num_trajectories": 559
2450
+ },
2451
+ "dlr_edan_shared_control_converted_externally_to_rlds/0.1.0": {
2452
+ "action": {
2453
+ "mean": [
2454
+ 0.0066478196531534195,
2455
+ -0.0007657355745323002,
2456
+ 0.006522845011204481,
2457
+ 0.0011679773451760411,
2458
+ -0.006395624950528145,
2459
+ -0.011903021484613419,
2460
+ 0.6985887289047241
2461
+ ],
2462
+ "std": [
2463
+ 0.021393585950136185,
2464
+ 0.018142299726605415,
2465
+ 0.03374377265572548,
2466
+ 0.01743541844189167,
2467
+ 0.03394372761249542,
2468
+ 0.04641878604888916,
2469
+ 0.45885783433914185
2470
+ ],
2471
+ "max": [
2472
+ 0.18991442024707794,
2473
+ 0.0739002525806427,
2474
+ 0.18064819276332855,
2475
+ 0.0866486132144928,
2476
+ 0.13464981317520142,
2477
+ 0.16910280287265778,
2478
+ 1.0
2479
+ ],
2480
+ "min": [
2481
+ -0.10054297000169754,
2482
+ -0.08427435159683228,
2483
+ -0.13533438742160797,
2484
+ -0.17556548118591309,
2485
+ -0.18485672771930695,
2486
+ -0.2680685818195343,
2487
+ 0.0
2488
+ ],
2489
+ "q01": [
2490
+ -0.02987122368067503,
2491
+ -0.06013262912631035,
2492
+ -0.08286409199237824,
2493
+ -0.05924444157630205,
2494
+ -0.15986866518855095,
2495
+ -0.15636983573436739,
2496
+ 0.0
2497
+ ],
2498
+ "q99": [
2499
+ 0.08832092039287087,
2500
+ 0.042126184627413736,
2501
+ 0.11311905644834042,
2502
+ 0.0643695573508739,
2503
+ 0.03941855944693088,
2504
+ 0.156646853685379,
2505
+ 1.0
2506
+ ],
2507
+ "mask": [
2508
+ true,
2509
+ true,
2510
+ true,
2511
+ true,
2512
+ true,
2513
+ true,
2514
+ false
2515
+ ]
2516
+ },
2517
+ "proprio": {
2518
+ "mean": [
2519
+ 0.0,
2520
+ 0.0,
2521
+ 0.0,
2522
+ 0.0,
2523
+ 0.0,
2524
+ 0.0,
2525
+ 0.0
2526
+ ],
2527
+ "std": [
2528
+ 0.0,
2529
+ 0.0,
2530
+ 0.0,
2531
+ 0.0,
2532
+ 0.0,
2533
+ 0.0,
2534
+ 0.0
2535
+ ],
2536
+ "max": [
2537
+ 0.0,
2538
+ 0.0,
2539
+ 0.0,
2540
+ 0.0,
2541
+ 0.0,
2542
+ 0.0,
2543
+ 0.0
2544
+ ],
2545
+ "min": [
2546
+ 0.0,
2547
+ 0.0,
2548
+ 0.0,
2549
+ 0.0,
2550
+ 0.0,
2551
+ 0.0,
2552
+ 0.0
2553
+ ],
2554
+ "q01": [
2555
+ 0.0,
2556
+ 0.0,
2557
+ 0.0,
2558
+ 0.0,
2559
+ 0.0,
2560
+ 0.0,
2561
+ 0.0
2562
+ ],
2563
+ "q99": [
2564
+ 0.0,
2565
+ 0.0,
2566
+ 0.0,
2567
+ 0.0,
2568
+ 0.0,
2569
+ 0.0,
2570
+ 0.0
2571
+ ]
2572
+ },
2573
+ "num_transitions": 8928,
2574
+ "num_trajectories": 104
2575
+ },
2576
+ "iamlab_cmu_pickup_insert_converted_externally_to_rlds/0.1.0": {
2577
+ "action": {
2578
+ "mean": [
2579
+ 0.5274373292922974,
2580
+ 0.028582017868757248,
2581
+ 0.18712472915649414,
2582
+ 1.2339569330215454,
2583
+ 0.03226622939109802,
2584
+ -1.4199472665786743,
2585
+ 0.5550631880760193
2586
+ ],
2587
+ "std": [
2588
+ 0.08108346909284592,
2589
+ 0.1116756722331047,
2590
+ 0.07747555524110794,
2591
+ 2.8737244606018066,
2592
+ 0.02774704433977604,
2593
+ 2.7678685188293457,
2594
+ 0.4969509243965149
2595
+ ],
2596
+ "max": [
2597
+ 0.6634981632232666,
2598
+ 0.23428471386432648,
2599
+ 0.4308285415172577,
2600
+ 3.1415927410125732,
2601
+ 0.13647015392780304,
2602
+ 3.141592502593994,
2603
+ 1.0
2604
+ ],
2605
+ "min": [
2606
+ 0.3071657121181488,
2607
+ -0.29754969477653503,
2608
+ 0.06578229367733002,
2609
+ -3.1415927410125732,
2610
+ -0.04584203287959099,
2611
+ -3.141592502593994,
2612
+ 0.0
2613
+ ],
2614
+ "q01": [
2615
+ 0.3148897051811218,
2616
+ -0.20317550599575043,
2617
+ 0.06785467118024827,
2618
+ -3.140952730178833,
2619
+ -0.029743434861302376,
2620
+ -3.141091251373291,
2621
+ 0.0
2622
+ ],
2623
+ "q99": [
2624
+ 0.6472805738449097,
2625
+ 0.20846802592277527,
2626
+ 0.36855655312538155,
2627
+ 3.1409926891326903,
2628
+ 0.11424950212240226,
2629
+ 3.1410969257354737,
2630
+ 1.0
2631
+ ],
2632
+ "mask": [
2633
+ true,
2634
+ true,
2635
+ true,
2636
+ true,
2637
+ true,
2638
+ true,
2639
+ false
2640
+ ]
2641
+ },
2642
+ "proprio": {
2643
+ "mean": [
2644
+ 0.0,
2645
+ 0.0,
2646
+ 0.0,
2647
+ 0.0,
2648
+ 0.0,
2649
+ 0.0,
2650
+ 0.0
2651
+ ],
2652
+ "std": [
2653
+ 0.0,
2654
+ 0.0,
2655
+ 0.0,
2656
+ 0.0,
2657
+ 0.0,
2658
+ 0.0,
2659
+ 0.0
2660
+ ],
2661
+ "max": [
2662
+ 0.0,
2663
+ 0.0,
2664
+ 0.0,
2665
+ 0.0,
2666
+ 0.0,
2667
+ 0.0,
2668
+ 0.0
2669
+ ],
2670
+ "min": [
2671
+ 0.0,
2672
+ 0.0,
2673
+ 0.0,
2674
+ 0.0,
2675
+ 0.0,
2676
+ 0.0,
2677
+ 0.0
2678
+ ],
2679
+ "q01": [
2680
+ 0.0,
2681
+ 0.0,
2682
+ 0.0,
2683
+ 0.0,
2684
+ 0.0,
2685
+ 0.0,
2686
+ 0.0
2687
+ ],
2688
+ "q99": [
2689
+ 0.0,
2690
+ 0.0,
2691
+ 0.0,
2692
+ 0.0,
2693
+ 0.0,
2694
+ 0.0,
2695
+ 0.0
2696
+ ]
2697
+ },
2698
+ "num_transitions": 146241,
2699
+ "num_trajectories": 631
2700
+ },
2701
+ "utaustin_mutex/0.1.0": {
2702
+ "action": {
2703
+ "mean": [
2704
+ 0.06176406517624855,
2705
+ -0.005005490034818649,
2706
+ 0.10216782987117767,
2707
+ -0.03314131125807762,
2708
+ 0.013895022682845592,
2709
+ -0.011317633092403412,
2710
+ 0.5038976669311523
2711
+ ],
2712
+ "std": [
2713
+ 0.187501460313797,
2714
+ 0.4468473196029663,
2715
+ 0.3792876601219177,
2716
+ 0.14097853004932404,
2717
+ 0.06453699618577957,
2718
+ 0.11765265464782715,
2719
+ 0.501045286655426
2720
+ ],
2721
+ "max": [
2722
+ 1.0,
2723
+ 1.0,
2724
+ 1.0,
2725
+ 0.375,
2726
+ 0.375,
2727
+ 0.375,
2728
+ 1.0
2729
+ ],
2730
+ "min": [
2731
+ -1.0,
2732
+ -1.0,
2733
+ -1.0,
2734
+ -0.375,
2735
+ -0.375,
2736
+ -0.375,
2737
+ 0.0
2738
+ ],
2739
+ "q01": [
2740
+ -0.4285714328289032,
2741
+ -0.9800000190734863,
2742
+ -0.5571428537368774,
2743
+ -0.375,
2744
+ -0.15642857551574707,
2745
+ -0.335357129573822,
2746
+ 0.0
2747
+ ],
2748
+ "q99": [
2749
+ 0.5914285778999329,
2750
+ 0.9714285731315613,
2751
+ 1.0,
2752
+ 0.3278571367263794,
2753
+ 0.207857146859169,
2754
+ 0.25607141852378845,
2755
+ 1.0
2756
+ ],
2757
+ "mask": [
2758
+ true,
2759
+ true,
2760
+ true,
2761
+ true,
2762
+ true,
2763
+ true,
2764
+ false
2765
+ ]
2766
+ },
2767
+ "proprio": {
2768
+ "mean": [
2769
+ 0.0,
2770
+ 0.0,
2771
+ 0.0,
2772
+ 0.0,
2773
+ 0.0,
2774
+ 0.0,
2775
+ 0.0
2776
+ ],
2777
+ "std": [
2778
+ 0.0,
2779
+ 0.0,
2780
+ 0.0,
2781
+ 0.0,
2782
+ 0.0,
2783
+ 0.0,
2784
+ 0.0
2785
+ ],
2786
+ "max": [
2787
+ 0.0,
2788
+ 0.0,
2789
+ 0.0,
2790
+ 0.0,
2791
+ 0.0,
2792
+ 0.0,
2793
+ 0.0
2794
+ ],
2795
+ "min": [
2796
+ 0.0,
2797
+ 0.0,
2798
+ 0.0,
2799
+ 0.0,
2800
+ 0.0,
2801
+ 0.0,
2802
+ 0.0
2803
+ ],
2804
+ "q01": [
2805
+ 0.0,
2806
+ 0.0,
2807
+ 0.0,
2808
+ 0.0,
2809
+ 0.0,
2810
+ 0.0,
2811
+ 0.0
2812
+ ],
2813
+ "q99": [
2814
+ 0.0,
2815
+ 0.0,
2816
+ 0.0,
2817
+ 0.0,
2818
+ 0.0,
2819
+ 0.0,
2820
+ 0.0
2821
+ ]
2822
+ },
2823
+ "num_transitions": 361883,
2824
+ "num_trajectories": 1500
2825
+ },
2826
+ "berkeley_fanuc_manipulation/0.1.0": {
2827
+ "action": {
2828
+ "mean": [
2829
+ 0.0007744057802483439,
2830
+ -0.00031240080716088414,
2831
+ -0.0015001941937953234,
2832
+ -0.0007515158504247665,
2833
+ -0.00015832878125365824,
2834
+ 0.00014327642566058785,
2835
+ 0.699295699596405
2836
+ ],
2837
+ "std": [
2838
+ 0.0034070133697241545,
2839
+ 0.00499219074845314,
2840
+ 0.005344326142221689,
2841
+ 0.007599010597914457,
2842
+ 0.004081932827830315,
2843
+ 0.008568963967263699,
2844
+ 0.45868709683418274
2845
+ ],
2846
+ "max": [
2847
+ 0.009999999776482582,
2848
+ 0.009999999776482582,
2849
+ 0.009999999776482582,
2850
+ 0.03490658476948738,
2851
+ 0.03490658476948738,
2852
+ 0.03490658476948738,
2853
+ 1.0
2854
+ ],
2855
+ "min": [
2856
+ -0.009999999776482582,
2857
+ -0.009999999776482582,
2858
+ -0.009999999776482582,
2859
+ -0.03490658476948738,
2860
+ -0.03490658476948738,
2861
+ -0.03490658476948738,
2862
+ 0.0
2863
+ ],
2864
+ "q01": [
2865
+ -0.009999999776482582,
2866
+ -0.009999999776482582,
2867
+ -0.009999999776482582,
2868
+ -0.03490658476948738,
2869
+ 0.0,
2870
+ -0.03490658476948738,
2871
+ 0.0
2872
+ ],
2873
+ "q99": [
2874
+ 0.009999999776482582,
2875
+ 0.009999999776482582,
2876
+ 0.009999999776482582,
2877
+ 0.03490658476948738,
2878
+ 0.0,
2879
+ 0.03490658476948738,
2880
+ 1.0
2881
+ ],
2882
+ "mask": [
2883
+ true,
2884
+ true,
2885
+ true,
2886
+ true,
2887
+ true,
2888
+ true,
2889
+ false
2890
+ ]
2891
+ },
2892
+ "proprio": {
2893
+ "mean": [
2894
+ 0.0,
2895
+ 0.0,
2896
+ 0.0,
2897
+ 0.0,
2898
+ 0.0,
2899
+ 0.0,
2900
+ 0.0
2901
+ ],
2902
+ "std": [
2903
+ 0.0,
2904
+ 0.0,
2905
+ 0.0,
2906
+ 0.0,
2907
+ 0.0,
2908
+ 0.0,
2909
+ 0.0
2910
+ ],
2911
+ "max": [
2912
+ 0.0,
2913
+ 0.0,
2914
+ 0.0,
2915
+ 0.0,
2916
+ 0.0,
2917
+ 0.0,
2918
+ 0.0
2919
+ ],
2920
+ "min": [
2921
+ 0.0,
2922
+ 0.0,
2923
+ 0.0,
2924
+ 0.0,
2925
+ 0.0,
2926
+ 0.0,
2927
+ 0.0
2928
+ ],
2929
+ "q01": [
2930
+ 0.0,
2931
+ 0.0,
2932
+ 0.0,
2933
+ 0.0,
2934
+ 0.0,
2935
+ 0.0,
2936
+ 0.0
2937
+ ],
2938
+ "q99": [
2939
+ 0.0,
2940
+ 0.0,
2941
+ 0.0,
2942
+ 0.0,
2943
+ 0.0,
2944
+ 0.0,
2945
+ 0.0
2946
+ ]
2947
+ },
2948
+ "num_transitions": 62613,
2949
+ "num_trajectories": 415
2950
+ },
2951
+ "cmu_stretch/0.1.0": {
2952
+ "action": {
2953
+ "mean": [
2954
+ 0.0003630445571616292,
2955
+ 0.0,
2956
+ 0.0016466928645968437,
2957
+ 0.0,
2958
+ 0.0,
2959
+ 0.0,
2960
+ 0.3987048268318176
2961
+ ],
2962
+ "std": [
2963
+ 0.004081855062395334,
2964
+ 0.0,
2965
+ 0.003774340031668544,
2966
+ 0.0,
2967
+ 0.0,
2968
+ 0.0,
2969
+ 0.489638090133667
2970
+ ],
2971
+ "max": [
2972
+ 0.02338407188653946,
2973
+ 0.0,
2974
+ 0.023404927924275398,
2975
+ 0.0,
2976
+ 0.0,
2977
+ 0.0,
2978
+ 1.0
2979
+ ],
2980
+ "min": [
2981
+ -0.019353797659277916,
2982
+ 0.0,
2983
+ -0.02019215188920498,
2984
+ 0.0,
2985
+ 0.0,
2986
+ 0.0,
2987
+ 0.0
2988
+ ],
2989
+ "q01": [
2990
+ -0.011175686959177256,
2991
+ 0.0,
2992
+ -0.0032206363626755773,
2993
+ 0.0,
2994
+ 0.0,
2995
+ 0.0,
2996
+ 0.0
2997
+ ],
2998
+ "q99": [
2999
+ 0.014501785952597848,
3000
+ 0.0,
3001
+ 0.015056106168776728,
3002
+ 0.0,
3003
+ 0.0,
3004
+ 0.0,
3005
+ 1.0
3006
+ ],
3007
+ "mask": [
3008
+ true,
3009
+ true,
3010
+ true,
3011
+ true,
3012
+ true,
3013
+ true,
3014
+ false
3015
+ ]
3016
+ },
3017
+ "proprio": {
3018
+ "mean": [
3019
+ 0.0,
3020
+ 0.0,
3021
+ 0.0,
3022
+ 0.0,
3023
+ 0.0,
3024
+ 0.0,
3025
+ 0.0
3026
+ ],
3027
+ "std": [
3028
+ 0.0,
3029
+ 0.0,
3030
+ 0.0,
3031
+ 0.0,
3032
+ 0.0,
3033
+ 0.0,
3034
+ 0.0
3035
+ ],
3036
+ "max": [
3037
+ 0.0,
3038
+ 0.0,
3039
+ 0.0,
3040
+ 0.0,
3041
+ 0.0,
3042
+ 0.0,
3043
+ 0.0
3044
+ ],
3045
+ "min": [
3046
+ 0.0,
3047
+ 0.0,
3048
+ 0.0,
3049
+ 0.0,
3050
+ 0.0,
3051
+ 0.0,
3052
+ 0.0
3053
+ ],
3054
+ "q01": [
3055
+ 0.0,
3056
+ 0.0,
3057
+ 0.0,
3058
+ 0.0,
3059
+ 0.0,
3060
+ 0.0,
3061
+ 0.0
3062
+ ],
3063
+ "q99": [
3064
+ 0.0,
3065
+ 0.0,
3066
+ 0.0,
3067
+ 0.0,
3068
+ 0.0,
3069
+ 0.0,
3070
+ 0.0
3071
+ ]
3072
+ },
3073
+ "num_transitions": 25016,
3074
+ "num_trajectories": 135
3075
+ },
3076
+ "bc_z/0.1.0": {
3077
+ "action": {
3078
+ "mean": [
3079
+ -0.009958645328879356,
3080
+ 0.0008958434336818755,
3081
+ 0.00499522453173995,
3082
+ 0.000297540333122015,
3083
+ -0.008734511211514473,
3084
+ -0.03068969026207924,
3085
+ 0.8344562649726868
3086
+ ],
3087
+ "std": [
3088
+ 0.030533093959093094,
3089
+ 0.0231416504830122,
3090
+ 0.020642085000872612,
3091
+ 0.04156165570020676,
3092
+ 0.04643021523952484,
3093
+ 0.07697845250368118,
3094
+ 0.36111101508140564
3095
+ ],
3096
+ "max": [
3097
+ 0.2165454924106598,
3098
+ 0.1251407265663147,
3099
+ 0.10772687941789627,
3100
+ 0.33544227480888367,
3101
+ 0.28117990493774414,
3102
+ 0.40614867210388184,
3103
+ 1.0
3104
+ ],
3105
+ "min": [
3106
+ -0.1677047461271286,
3107
+ -0.14630407094955444,
3108
+ -0.10066790133714676,
3109
+ -0.29421567916870117,
3110
+ -0.32101404666900635,
3111
+ -0.4635624885559082,
3112
+ 0.0
3113
+ ],
3114
+ "q01": [
3115
+ -0.09220654994249344,
3116
+ -0.06456145539879798,
3117
+ -0.049121275544166565,
3118
+ -0.11594625547528267,
3119
+ -0.14152548640966414,
3120
+ -0.2251061636209488,
3121
+ 0.0
3122
+ ],
3123
+ "q99": [
3124
+ 0.07628866866230968,
3125
+ 0.058019736707210584,
3126
+ 0.052540797740221024,
3127
+ 0.11740604028105736,
3128
+ 0.11703975558280955,
3129
+ 0.16729306846857078,
3130
+ 1.0
3131
+ ],
3132
+ "mask": [
3133
+ true,
3134
+ true,
3135
+ true,
3136
+ true,
3137
+ true,
3138
+ true,
3139
+ false
3140
+ ]
3141
+ },
3142
+ "proprio": {
3143
+ "mean": [
3144
+ 0.0,
3145
+ 0.0,
3146
+ 0.0,
3147
+ 0.0,
3148
+ 0.0,
3149
+ 0.0,
3150
+ 0.0
3151
+ ],
3152
+ "std": [
3153
+ 0.0,
3154
+ 0.0,
3155
+ 0.0,
3156
+ 0.0,
3157
+ 0.0,
3158
+ 0.0,
3159
+ 0.0
3160
+ ],
3161
+ "max": [
3162
+ 0.0,
3163
+ 0.0,
3164
+ 0.0,
3165
+ 0.0,
3166
+ 0.0,
3167
+ 0.0,
3168
+ 0.0
3169
+ ],
3170
+ "min": [
3171
+ 0.0,
3172
+ 0.0,
3173
+ 0.0,
3174
+ 0.0,
3175
+ 0.0,
3176
+ 0.0,
3177
+ 0.0
3178
+ ],
3179
+ "q01": [
3180
+ 0.0,
3181
+ 0.0,
3182
+ 0.0,
3183
+ 0.0,
3184
+ 0.0,
3185
+ 0.0,
3186
+ 0.0
3187
+ ],
3188
+ "q99": [
3189
+ 0.0,
3190
+ 0.0,
3191
+ 0.0,
3192
+ 0.0,
3193
+ 0.0,
3194
+ 0.0,
3195
+ 0.0
3196
+ ]
3197
+ },
3198
+ "num_transitions": 6015535,
3199
+ "num_trajectories": 43264
3200
+ },
3201
+ "fmb_dataset/1.0.0": {
3202
+ "action": {
3203
+ "mean": [
3204
+ 0.05902976542711258,
3205
+ -0.06476633995771408,
3206
+ -0.09787469357252121,
3207
+ 0.004325387068092823,
3208
+ 0.00028963759541511536,
3209
+ -0.04457257315516472,
3210
+ 0.7336440086364746
3211
+ ],
3212
+ "std": [
3213
+ 0.28809186816215515,
3214
+ 0.2820416986942291,
3215
+ 0.4626740515232086,
3216
+ 0.3266514539718628,
3217
+ 0.10842999070882797,
3218
+ 0.34400978684425354,
3219
+ 0.4435289800167084
3220
+ ],
3221
+ "max": [
3222
+ 1.399999976158142,
3223
+ 1.0,
3224
+ 1.399999976158142,
3225
+ 1.0,
3226
+ 1.0,
3227
+ 1.0,
3228
+ 1.0
3229
+ ],
3230
+ "min": [
3231
+ -1.399999976158142,
3232
+ -1.399999976158142,
3233
+ -1.0,
3234
+ -1.0,
3235
+ -1.0,
3236
+ -1.0,
3237
+ 0.0
3238
+ ],
3239
+ "q01": [
3240
+ -0.8257142901420593,
3241
+ -1.399999976158142,
3242
+ -1.0,
3243
+ -1.0,
3244
+ -0.3028571307659149,
3245
+ -1.0,
3246
+ 0.0
3247
+ ],
3248
+ "q99": [
3249
+ 1.0,
3250
+ 0.5257142782211304,
3251
+ 1.0,
3252
+ 1.0,
3253
+ 0.3400000035762787,
3254
+ 1.0,
3255
+ 1.0
3256
+ ],
3257
+ "mask": [
3258
+ true,
3259
+ true,
3260
+ true,
3261
+ true,
3262
+ true,
3263
+ true,
3264
+ false
3265
+ ]
3266
+ },
3267
+ "proprio": {
3268
+ "mean": [
3269
+ 0.0,
3270
+ 0.0,
3271
+ 0.0,
3272
+ 0.0,
3273
+ 0.0,
3274
+ 0.0,
3275
+ 0.0
3276
+ ],
3277
+ "std": [
3278
+ 0.0,
3279
+ 0.0,
3280
+ 0.0,
3281
+ 0.0,
3282
+ 0.0,
3283
+ 0.0,
3284
+ 0.0
3285
+ ],
3286
+ "max": [
3287
+ 0.0,
3288
+ 0.0,
3289
+ 0.0,
3290
+ 0.0,
3291
+ 0.0,
3292
+ 0.0,
3293
+ 0.0
3294
+ ],
3295
+ "min": [
3296
+ 0.0,
3297
+ 0.0,
3298
+ 0.0,
3299
+ 0.0,
3300
+ 0.0,
3301
+ 0.0,
3302
+ 0.0
3303
+ ],
3304
+ "q01": [
3305
+ 0.0,
3306
+ 0.0,
3307
+ 0.0,
3308
+ 0.0,
3309
+ 0.0,
3310
+ 0.0,
3311
+ 0.0
3312
+ ],
3313
+ "q99": [
3314
+ 0.0,
3315
+ 0.0,
3316
+ 0.0,
3317
+ 0.0,
3318
+ 0.0,
3319
+ 0.0,
3320
+ 0.0
3321
+ ]
3322
+ },
3323
+ "num_transitions": 1137459,
3324
+ "num_trajectories": 8612
3325
+ },
3326
+ "dobbe/0.0.1": {
3327
+ "action": {
3328
+ "mean": [
3329
+ -0.00011206958151888102,
3330
+ 0.0011229681549593806,
3331
+ -0.00010193959315074608,
3332
+ -7.37128357286565e-05,
3333
+ -0.0006753374473191798,
3334
+ -5.664441778208129e-05,
3335
+ 0.6318688988685608
3336
+ ],
3337
+ "std": [
3338
+ 0.042660679668188095,
3339
+ 0.04428431764245033,
3340
+ 0.12224890291690826,
3341
+ 0.005388470832258463,
3342
+ 0.011246936395764351,
3343
+ 0.006288259290158749,
3344
+ 0.3973240256309509
3345
+ ],
3346
+ "max": [
3347
+ 38.590423583984375,
3348
+ 17.932697296142578,
3349
+ 4.843764305114746,
3350
+ 1.4372116327285767,
3351
+ 0.4340403974056244,
3352
+ 1.2057193517684937,
3353
+ 0.9998947381973267
3354
+ ],
3355
+ "min": [
3356
+ -5.700923442840576,
3357
+ -21.605947494506836,
3358
+ -123.72489929199219,
3359
+ -1.7229845523834229,
3360
+ -0.4998578727245331,
3361
+ -0.8867913484573364,
3362
+ 1.4196479014572105e-06
3363
+ ],
3364
+ "q01": [
3365
+ -0.01119564864784479,
3366
+ -0.014266146533191203,
3367
+ -0.0071747214533388615,
3368
+ -0.009444301575422287,
3369
+ -0.03990109823644161,
3370
+ -0.017422311007976532,
3371
+ 4.003279136668425e-05
3372
+ ],
3373
+ "q99": [
3374
+ 0.01015154086053368,
3375
+ 0.017181577533483497,
3376
+ 0.007216989761218411,
3377
+ 0.010380979906767595,
3378
+ 0.03556173853576176,
3379
+ 0.018032474815845446,
3380
+ 0.9982578039169312
3381
+ ],
3382
+ "mask": [
3383
+ true,
3384
+ true,
3385
+ true,
3386
+ true,
3387
+ true,
3388
+ true,
3389
+ false
3390
+ ]
3391
+ },
3392
+ "proprio": {
3393
+ "mean": [
3394
+ 0.0,
3395
+ 0.0,
3396
+ 0.0,
3397
+ 0.0,
3398
+ 0.0,
3399
+ 0.0,
3400
+ 0.0
3401
+ ],
3402
+ "std": [
3403
+ 0.0,
3404
+ 0.0,
3405
+ 0.0,
3406
+ 0.0,
3407
+ 0.0,
3408
+ 0.0,
3409
+ 0.0
3410
+ ],
3411
+ "max": [
3412
+ 0.0,
3413
+ 0.0,
3414
+ 0.0,
3415
+ 0.0,
3416
+ 0.0,
3417
+ 0.0,
3418
+ 0.0
3419
+ ],
3420
+ "min": [
3421
+ 0.0,
3422
+ 0.0,
3423
+ 0.0,
3424
+ 0.0,
3425
+ 0.0,
3426
+ 0.0,
3427
+ 0.0
3428
+ ],
3429
+ "q01": [
3430
+ 0.0,
3431
+ 0.0,
3432
+ 0.0,
3433
+ 0.0,
3434
+ 0.0,
3435
+ 0.0,
3436
+ 0.0
3437
+ ],
3438
+ "q99": [
3439
+ 0.0,
3440
+ 0.0,
3441
+ 0.0,
3442
+ 0.0,
3443
+ 0.0,
3444
+ 0.0,
3445
+ 0.0
3446
+ ]
3447
+ },
3448
+ "num_transitions": 1139911,
3449
+ "num_trajectories": 5208
3450
+ },
3451
+ "droid/1.0.0": {
3452
+ "action": {
3453
+ "mean": [
3454
+ 0.027425529435276985,
3455
+ -0.0026820411439985037,
3456
+ 0.01595238223671913,
3457
+ 0.0035501928068697453,
3458
+ -0.030532635748386383,
3459
+ -0.006685464642941952,
3460
+ 0.5860344171524048
3461
+ ],
3462
+ "std": [
3463
+ 0.25387412309646606,
3464
+ 0.18426834046840668,
3465
+ 0.22532416880130768,
3466
+ 0.21757009625434875,
3467
+ 0.22572560608386993,
3468
+ 0.2867794930934906,
3469
+ 0.4287726879119873
3470
+ ],
3471
+ "max": [
3472
+ 0.9999998211860657,
3473
+ 0.999991774559021,
3474
+ 0.9999973177909851,
3475
+ 0.9999874830245972,
3476
+ 0.9999954104423523,
3477
+ 0.9999998807907104,
3478
+ 1.0
3479
+ ],
3480
+ "min": [
3481
+ -0.9999999403953552,
3482
+ -0.9999951124191284,
3483
+ -0.9999960660934448,
3484
+ -0.9999980330467224,
3485
+ -0.9999982118606567,
3486
+ -0.9999998807907104,
3487
+ 0.0
3488
+ ],
3489
+ "q01": [
3490
+ -0.7776297926902771,
3491
+ -0.5803514122962952,
3492
+ -0.5795090794563293,
3493
+ -0.6464047729969025,
3494
+ -0.7041108310222626,
3495
+ -0.8895104378461838,
3496
+ 0.0
3497
+ ],
3498
+ "q99": [
3499
+ 0.7597932070493698,
3500
+ 0.5726242214441299,
3501
+ 0.7351000607013702,
3502
+ 0.6705610305070877,
3503
+ 0.6464948207139969,
3504
+ 0.8897542208433151,
3505
+ 1.0
3506
+ ],
3507
+ "mask": [
3508
+ true,
3509
+ true,
3510
+ true,
3511
+ true,
3512
+ true,
3513
+ true,
3514
+ false
3515
+ ]
3516
+ },
3517
+ "proprio": {
3518
+ "mean": [
3519
+ 0.0,
3520
+ 0.0,
3521
+ 0.0,
3522
+ 0.0,
3523
+ 0.0,
3524
+ 0.0,
3525
+ 0.0
3526
+ ],
3527
+ "std": [
3528
+ 0.0,
3529
+ 0.0,
3530
+ 0.0,
3531
+ 0.0,
3532
+ 0.0,
3533
+ 0.0,
3534
+ 0.0
3535
+ ],
3536
+ "max": [
3537
+ 0.0,
3538
+ 0.0,
3539
+ 0.0,
3540
+ 0.0,
3541
+ 0.0,
3542
+ 0.0,
3543
+ 0.0
3544
+ ],
3545
+ "min": [
3546
+ 0.0,
3547
+ 0.0,
3548
+ 0.0,
3549
+ 0.0,
3550
+ 0.0,
3551
+ 0.0,
3552
+ 0.0
3553
+ ],
3554
+ "q01": [
3555
+ 0.0,
3556
+ 0.0,
3557
+ 0.0,
3558
+ 0.0,
3559
+ 0.0,
3560
+ 0.0,
3561
+ 0.0
3562
+ ],
3563
+ "q99": [
3564
+ 0.0,
3565
+ 0.0,
3566
+ 0.0,
3567
+ 0.0,
3568
+ 0.0,
3569
+ 0.0,
3570
+ 0.0
3571
+ ]
3572
+ },
3573
+ "num_transitions": 27044326,
3574
+ "num_trajectories": 92233
3575
+ },
3576
+ "rh20t_rlds/1.0.0": {
3577
+ "action": {
3578
+ "mean": [
3579
+ -5.332157638779582e+28,
3580
+ -1.5128827327837974e+29,
3581
+ -1.832736619079747e+28,
3582
+ 0.5735913515090942,
3583
+ -0.00847744569182396,
3584
+ -0.5566052198410034,
3585
+ 0.3186892569065094
3586
+ ],
3587
+ "std": [
3588
+ Infinity,
3589
+ Infinity,
3590
+ Infinity,
3591
+ 2.2581026554107666,
3592
+ 0.1548534482717514,
3593
+ 2.2581026554107666,
3594
+ 0.39917993545532227
3595
+ ],
3596
+ "max": [
3597
+ 7.582831568163597e+35,
3598
+ 7.557172735451728e+35,
3599
+ 2.2717764477020827e+27,
3600
+ 3.1415927410125732,
3601
+ 1.5116956233978271,
3602
+ 3.1415927410125732,
3603
+ 1.0
3604
+ ],
3605
+ "min": [
3606
+ -3.5543094244408723e+36,
3607
+ -8.723098019507117e+36,
3608
+ -9.648338287048974e+35,
3609
+ -3.1415927410125732,
3610
+ -1.5062522888183594,
3611
+ -3.1415927410125732,
3612
+ 0.0
3613
+ ],
3614
+ "q01": [
3615
+ 0.36028257966041566,
3616
+ -0.272584410905838,
3617
+ 0.005985925104469062,
3618
+ -3.1411514282226562,
3619
+ -0.5925320792198181,
3620
+ -3.1415159702301025,
3621
+ 0.0
3622
+ ],
3623
+ "q99": [
3624
+ 0.7534684538841248,
3625
+ 0.31738221645355225,
3626
+ 0.33061375379562374,
3627
+ 3.141425132751465,
3628
+ 0.47507260441780086,
3629
+ 3.141479730606079,
3630
+ 1.0
3631
+ ],
3632
+ "mask": [
3633
+ true,
3634
+ true,
3635
+ true,
3636
+ true,
3637
+ true,
3638
+ true,
3639
+ false
3640
+ ]
3641
+ },
3642
+ "proprio": {
3643
+ "mean": [
3644
+ 0.0,
3645
+ 0.0,
3646
+ 0.0,
3647
+ 0.0,
3648
+ 0.0,
3649
+ 0.0,
3650
+ 0.0
3651
+ ],
3652
+ "std": [
3653
+ 0.0,
3654
+ 0.0,
3655
+ 0.0,
3656
+ 0.0,
3657
+ 0.0,
3658
+ 0.0,
3659
+ 0.0
3660
+ ],
3661
+ "max": [
3662
+ 0.0,
3663
+ 0.0,
3664
+ 0.0,
3665
+ 0.0,
3666
+ 0.0,
3667
+ 0.0,
3668
+ 0.0
3669
+ ],
3670
+ "min": [
3671
+ 0.0,
3672
+ 0.0,
3673
+ 0.0,
3674
+ 0.0,
3675
+ 0.0,
3676
+ 0.0,
3677
+ 0.0
3678
+ ],
3679
+ "q01": [
3680
+ 0.0,
3681
+ 0.0,
3682
+ 0.0,
3683
+ 0.0,
3684
+ 0.0,
3685
+ 0.0,
3686
+ 0.0
3687
+ ],
3688
+ "q99": [
3689
+ 0.0,
3690
+ 0.0,
3691
+ 0.0,
3692
+ 0.0,
3693
+ 0.0,
3694
+ 0.0,
3695
+ 0.0
3696
+ ]
3697
+ },
3698
+ "num_transitions": 52644433,
3699
+ "num_trajectories": 104392
3700
+ }
3701
+ }
3702
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ {
4
+ "content": "<image>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ }
10
+ ],
11
+ "bos_token": {
12
+ "content": "<bos>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false
17
+ },
18
+ "eos_token": {
19
+ "content": "<eos>",
20
+ "lstrip": false,
21
+ "normalized": false,
22
+ "rstrip": false,
23
+ "single_word": false
24
+ },
25
+ "pad_token": {
26
+ "content": "<pad>",
27
+ "lstrip": false,
28
+ "normalized": false,
29
+ "rstrip": false,
30
+ "single_word": false
31
+ },
32
+ "unk_token": {
33
+ "content": "<unk>",
34
+ "lstrip": false,
35
+ "normalized": false,
36
+ "rstrip": false,
37
+ "single_word": false
38
+ }
39
+ }
test_huggingface.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import argparse
3
+ from pathlib import Path
4
+ import shutil
5
+ import os
6
+ import argparse
7
+ from pathlib import Path
8
+ import shutil
9
+ import torch
10
+ from PIL import Image
11
+ from transformers import AutoModel, AutoProcessor
12
+
13
+ parser = argparse.ArgumentParser("Huggingface AutoModel Tesing")
14
+ parser.add_argument("--model_name_or_path", default="", help="pretrained model name or path.")
15
+ parser.add_argument("--num_images", type=int, default=1, help="num_images for testing.")
16
+
17
+ args = parser.parse_args()
18
+ if __name__ == "__main__":
19
+ model_name_or_path = Path(args.model_name_or_path)
20
+ processor = AutoProcessor.from_pretrained(args.model_name_or_path, trust_remote_code=True)
21
+ print(processor.statistics)
22
+
23
+ model = AutoModel.from_pretrained(args.model_name_or_path, trust_remote_code=True, torch_dtype=torch.bfloat16).eval().cuda()
24
+
25
+ image = Image.open("example.png").convert("RGB")
26
+ images = [image] * args.num_images
27
+ prompt = "What action should the robot take to pick the cpu?"
28
+ inputs = processor(images=images, text=prompt, unnorm_key="bridge_orig/1.0.0", return_tensors="pt")
29
+ print(inputs)
30
+
31
+ generation_outputs = model.predict_action(inputs)
32
+ print(generation_outputs, processor.batch_decode(generation_outputs))
33
+
34
+ actions = processor.decode_actions(generation_outputs, unnorm_key="bridge_orig/1.0.0")
35
+ print(actions)
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2523a63c898ebf0a32c7282a2e459ef2c950a846c5f3172305089e4149b6b6c3
3
+ size 36157680
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff