from collections import namedtuple
import numpy as np
import torch


class OnPolicyReplayBuffer:
    def __init__(self, device):
        self.memory_pool = []
        self.transition = namedtuple('Transition', ['state', 'action', 'reward', 'next_state', 'mask', 'done'])
        self.device = torch.device(device)

    def __len__(self):
        return len(self.memory_pool)

    def store(self, state, action, reward, next_state, mask, done):
        transition = self.transition(state, action, reward, next_state, mask, done)
        self.memory_pool.append(transition)

    def sample(self):
        states = torch.tensor(np.vstack([t.state for t in self.memory_pool])).float().to(self.device)
        actions = torch.tensor(np.vstack([t.action for t in self.memory_pool])).to(self.device)
        rewards = torch.tensor(np.vstack([t.reward for t in self.memory_pool])).float().to(self.device)
        next_states = torch.tensor(np.vstack([t.next_state for t in self.memory_pool])).float().to(self.device)
        masks = torch.tensor(np.vstack([t.mask for t in self.memory_pool])).float().to(self.device)
        dones = torch.tensor(np.vstack([t.done for t in self.memory_pool])).float().to(self.device)
        return states, actions, rewards, next_states, masks, dones

    def reset(self):
        self.memory_pool = []