im-Kitsch commited on
Commit
ad07a47
1 Parent(s): 2289851

Create transfer.py

Browse files
Files changed (1) hide show
  1. transfer.py +126 -0
transfer.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import d4rl.gym_mujoco
2
+ import gym
3
+ import gymnasium
4
+ import minari
5
+ import numpy as np
6
+
7
+
8
+ def get_tuple_from_minari_dataset(dataset_name):
9
+ dt = minari.load_dataset(dataset_name)
10
+ observations, actions, rewards, next_observations, terminations, truncations = \
11
+ [], [], [], [], [], []
12
+ traj_length = []
13
+ for _ep in dt:
14
+ observations.append(_ep.observations[:-1])
15
+ actions.append(_ep.actions)
16
+ rewards.append(_ep.rewards)
17
+ next_observations.append(_ep.observations[1:])
18
+ terminations.append(_ep.terminations)
19
+ truncations.append(_ep.truncations)
20
+ traj_length.append(len(_ep.rewards))
21
+ assert (_ep.truncations[-1] or _ep.terminations[-1])
22
+ observations, actions, rewards, next_observations, terminations, truncations = \
23
+ map(np.concatenate, [observations, actions, rewards, next_observations, terminations, truncations])
24
+ traj_length = np.array(traj_length)
25
+ return observations, actions, rewards, next_observations, terminations, truncations, traj_length
26
+
27
+
28
+ def step_tuple_to_traj_tuple(obs, act, rew, next_obs, term, trunc):
29
+ dones = np.logical_or(term, trunc)[:-1] # last one should not be used for split to avoid empty chunk
30
+ dones_ind = np.where(dones)[0] + 1
31
+ obs, act, rew, next_obs, term, trunc = \
32
+ map(lambda x: np.split(x, dones_ind), [obs, act, rew, next_obs, term, trunc])
33
+
34
+ obs_new = [np.concatenate([_obs, _next_obs[-1].reshape(1, -1)])
35
+ for _obs, _next_obs in zip(obs, next_obs)]
36
+ buffer = []
37
+ keys = ['observations', 'actions', 'rewards', 'terminations', 'truncations']
38
+ for _traj_dt in zip(obs_new, act, rew, term, trunc):
39
+ _buff_i = dict(zip(keys, _traj_dt))
40
+ buffer.append(_buff_i)
41
+ return buffer
42
+
43
+
44
+ def make_traj_based_buffer(d4rl_env_name):
45
+ env = gym.make(d4rl_env_name)
46
+ dt = env.get_dataset()
47
+ obs = dt['observations']
48
+ next_obs = dt['next_observations']
49
+ rewards = dt['rewards']
50
+ actions = dt['actions']
51
+ terminations = dt['terminals']
52
+ truncations = dt['timeouts']
53
+
54
+ buffer = step_tuple_to_traj_tuple(obs, actions, rewards, next_obs, terminations, truncations)
55
+
56
+ return buffer, env
57
+
58
+
59
+ def create_standard_d4rl():
60
+
61
+ mujoco_envs = ['Hopper', 'HalfCheetah', 'Ant', 'Walker2d']
62
+ quality_lists = ['expert', 'medium', 'random', 'medium-expert']
63
+
64
+ for _env_prefix in mujoco_envs:
65
+ for _quality in quality_lists:
66
+ env_name = f'{_env_prefix.lower()}-{_quality}-v2'
67
+
68
+ buffer, env = make_traj_based_buffer(env_name)
69
+ if not (buffer[-1]["terminations"][-1] or buffer[-1]["truncations"][-1]):
70
+ buffer[-1]["truncations"][-1] = True
71
+
72
+ gymnasium_env = gymnasium.make(f'{_env_prefix}-v2')
73
+ dataset = minari.create_dataset_from_buffers(
74
+ dataset_id=env_name,
75
+ env=gymnasium_env,
76
+ buffer=buffer,
77
+ algorithm_name='SAC',
78
+ author='Zhiyuan',
79
+ # minari_version=f"{minari.__version__}",
80
+ author_email='levi.huzhiyuan@gmail.com',
81
+ code_permalink='TODO',
82
+ ref_min_score=env.ref_min_score,
83
+ ref_max_score=env.ref_max_score,
84
+ )
85
+ print('dataset created')
86
+ return
87
+
88
+
89
+ def validate_standard_d4rl():
90
+ mujoco_envs = ['Hopper', 'HalfCheetah', 'Ant', 'Walker2d']
91
+ quality_lists = ['expert', 'medium', 'random', 'medium-expert']
92
+
93
+ for _env_prefix in mujoco_envs:
94
+ for _quality in quality_lists:
95
+ env_name = f'{_env_prefix.lower()}-{_quality}-v2'
96
+
97
+ minari_tuple = get_tuple_from_minari_dataset(env_name)
98
+ m_obs, m_act, m_rew, m_next_obs, m_term, m_trunc, m_traj_len = minari_tuple
99
+
100
+ d4rl_data = gym.make(f'{_env_prefix.lower()}-{_quality}-v2').get_dataset()
101
+ assert np.all(m_act == d4rl_data["actions"])
102
+ assert np.all(m_obs == d4rl_data["observations"])
103
+ assert np.all(m_next_obs == d4rl_data["next_observations"])
104
+ assert np.all(m_rew == d4rl_data["rewards"])
105
+ assert np.all(m_term == d4rl_data["terminals"])
106
+ assert np.all(m_trunc[:-1] == d4rl_data["timeouts"][:-1])
107
+ assert m_trunc[-1]
108
+
109
+ d4rl_dones = np.logical_or(d4rl_data["terminals"], d4rl_data["timeouts"])[:-1]
110
+ # last one will always be added
111
+
112
+ d4rl_dones = np.where(d4rl_dones)[0]
113
+ num_d4rl = len(d4rl_data["rewards"])
114
+ d4rl_dones = np.concatenate([[-1], d4rl_dones, [num_d4rl - 1]])
115
+ d4rl_traj_length = d4rl_dones[1:] - d4rl_dones[:-1]
116
+ assert np.all(d4rl_traj_length == m_traj_len)
117
+ assert np.sum(m_traj_len) == len(m_rew)
118
+ print('validation passed')
119
+ return
120
+
121
+
122
+
123
+
124
+
125
+ create_standard_d4rl()
126
+ validate_standard_d4rl()