import torch
import numpy as np

class replay_buffer(object):
    def __init__(self, obs_dim, action_dim, size) -> None:
        self.obs_buf = np.zeros([size, obs_dim], dtype = np.float32)
        self.action_buf = np.zeros([size, action_dim], dtype = np.float32)
        self.nextobs_buf = np.zeros([size, obs_dim], dtype = np.float32)
        self.reward_buf = np.zeros(size, dtype = np.float32)
        self.done_buf = np.zeros(size, dtype = np.float32)
        self.ptr, self.size, self.maxsize = 0, 0, size

    def store(self, obs, action, nextobs, reward, done):
        self.obs_buf[self.ptr] = obs
        self.action_buf[self.ptr] = action
        self.nextobs_buf[self.ptr] = nextobs
        self.reward_buf[self.ptr] = reward
        self.done_buf[self.ptr] = done
        self.ptr = (self.ptr + 1) % self.maxsize
        self.size += 1
        if self.size > self.maxsize:
            self.size = self.maxsize

    def sample_batch(self, batch_size = 64, idxs = None):
        if idxs is None:
            idxs = np.random.randint(0, self.size, size = batch_size)
        return dict(
            obs = self.obs_buf[idxs],
            action = self.action_buf[idxs],
            nextobs = self.nextobs_buf[idxs],
            reward = self.reward_buf[idxs],
            done = self.done_buf[idxs],
            idxs = idxs
        )

    

