from collections import deque, namedtuple
import numpy as np
import random
import torch


class ReplayBuffer:
    def __init__(self, hyper, device):
        self.memory_pool = deque(maxlen=hyper['buffer_size'])
        self.batch_size = hyper['batch_size']
        self.transition = namedtuple('Transition', ['state', 'action', 'reward', 'next_state', 'mask'])
        self.device = torch.device(device)

    def __len__(self):
        return len(self.memory_pool)

    def store(self, state, action, reward, next_state, mask):
        transition = self.transition(state, action, reward, next_state, mask)
        self.memory_pool.append(transition)

    def sample(self):
        transitions = random.sample(self.memory_pool, k=self.batch_size)
        states = torch.tensor(np.vstack([t.state for t in transitions])).float().to(self.device)
        actions = torch.tensor(np.vstack([t.action for t in transitions])).float().to(self.device)
        rewards = torch.tensor(np.vstack([t.reward for t in transitions])).float().to(self.device)
        next_states = torch.tensor(np.vstack([t.next_state for t in transitions])).float().to(self.device)
        masks = torch.tensor(np.vstack([t.mask for t in transitions])).float().to(self.device)
        return states, actions, rewards, next_states, masks
