File size: 4,835 Bytes
ad07a47
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
import d4rl.gym_mujoco
import gym
import gymnasium
import minari
import numpy as np


def get_tuple_from_minari_dataset(dataset_name):
    dt = minari.load_dataset(dataset_name)
    observations, actions, rewards, next_observations, terminations, truncations = \
        [], [], [], [], [], []
    traj_length = []
    for _ep in dt:
        observations.append(_ep.observations[:-1])
        actions.append(_ep.actions)
        rewards.append(_ep.rewards)
        next_observations.append(_ep.observations[1:])
        terminations.append(_ep.terminations)
        truncations.append(_ep.truncations)
        traj_length.append(len(_ep.rewards))
        assert (_ep.truncations[-1] or _ep.terminations[-1])
    observations, actions, rewards, next_observations, terminations, truncations = \
        map(np.concatenate, [observations, actions, rewards, next_observations, terminations, truncations])
    traj_length = np.array(traj_length)
    return observations, actions, rewards, next_observations, terminations, truncations, traj_length


def step_tuple_to_traj_tuple(obs, act, rew, next_obs, term, trunc):
    dones = np.logical_or(term, trunc)[:-1]  # last one should not be used for split to avoid empty chunk
    dones_ind = np.where(dones)[0] + 1
    obs, act, rew, next_obs, term, trunc = \
        map(lambda x: np.split(x, dones_ind), [obs, act, rew, next_obs, term, trunc])

    obs_new = [np.concatenate([_obs, _next_obs[-1].reshape(1, -1)])
               for _obs, _next_obs in zip(obs, next_obs)]
    buffer = []
    keys = ['observations', 'actions', 'rewards', 'terminations', 'truncations']
    for _traj_dt in zip(obs_new, act, rew, term, trunc):
        _buff_i = dict(zip(keys, _traj_dt))
        buffer.append(_buff_i)
    return buffer


def make_traj_based_buffer(d4rl_env_name):
    env = gym.make(d4rl_env_name)
    dt = env.get_dataset()
    obs = dt['observations']
    next_obs = dt['next_observations']
    rewards = dt['rewards']
    actions = dt['actions']
    terminations = dt['terminals']
    truncations = dt['timeouts']

    buffer = step_tuple_to_traj_tuple(obs, actions, rewards, next_obs, terminations, truncations)

    return buffer, env


def create_standard_d4rl():

    mujoco_envs = ['Hopper', 'HalfCheetah', 'Ant', 'Walker2d']
    quality_lists = ['expert', 'medium', 'random', 'medium-expert']

    for _env_prefix in mujoco_envs:
        for _quality in quality_lists:
            env_name = f'{_env_prefix.lower()}-{_quality}-v2'

            buffer, env = make_traj_based_buffer(env_name)
            if not (buffer[-1]["terminations"][-1] or buffer[-1]["truncations"][-1]):
                buffer[-1]["truncations"][-1] = True

            gymnasium_env = gymnasium.make(f'{_env_prefix}-v2')
            dataset = minari.create_dataset_from_buffers(
                dataset_id=env_name,
                env=gymnasium_env,
                buffer=buffer,
                algorithm_name='SAC',
                author='Zhiyuan',
                # minari_version=f"{minari.__version__}",
                author_email='levi.huzhiyuan@gmail.com',
                code_permalink='TODO',
                ref_min_score=env.ref_min_score,
                ref_max_score=env.ref_max_score,
            )
            print('dataset created')
    return


def validate_standard_d4rl():
    mujoco_envs = ['Hopper', 'HalfCheetah', 'Ant', 'Walker2d']
    quality_lists = ['expert', 'medium', 'random', 'medium-expert']

    for _env_prefix in mujoco_envs:
        for _quality in quality_lists:
            env_name = f'{_env_prefix.lower()}-{_quality}-v2'

            minari_tuple = get_tuple_from_minari_dataset(env_name)
            m_obs, m_act, m_rew, m_next_obs, m_term, m_trunc, m_traj_len = minari_tuple

            d4rl_data = gym.make(f'{_env_prefix.lower()}-{_quality}-v2').get_dataset()
            assert np.all(m_act == d4rl_data["actions"])
            assert np.all(m_obs == d4rl_data["observations"])
            assert np.all(m_next_obs == d4rl_data["next_observations"])
            assert np.all(m_rew == d4rl_data["rewards"])
            assert np.all(m_term == d4rl_data["terminals"])
            assert np.all(m_trunc[:-1] == d4rl_data["timeouts"][:-1])
            assert m_trunc[-1]

            d4rl_dones = np.logical_or(d4rl_data["terminals"], d4rl_data["timeouts"])[:-1]
            # last one will always be added

            d4rl_dones = np.where(d4rl_dones)[0]
            num_d4rl = len(d4rl_data["rewards"])
            d4rl_dones = np.concatenate([[-1], d4rl_dones, [num_d4rl - 1]])
            d4rl_traj_length = d4rl_dones[1:] - d4rl_dones[:-1]
            assert np.all(d4rl_traj_length == m_traj_len)
            assert np.sum(m_traj_len) == len(m_rew)
            print('validation passed')
    return





create_standard_d4rl()
validate_standard_d4rl()