import collections
import torch
import random

class ReplayBuffer():
    def __init__(self,buffer_limit  = 50000):
        self.buffer = collections.deque(maxlen=buffer_limit)

    def put(self, transition):
        self.buffer.append(transition)

    def sample(self, n):
        mini_batch = random.sample(self.buffer, n)
        s_lst, a_lst, r_lst, s_prime_lst, done_mask_lst = [], [], [], [], []

        for transition in mini_batch:
            s, a, r, s_prime, done_mask = transition
            s_lst.append(s)
            a_lst.append([a])
            r_lst.append([r])
            s_prime_lst.append(s_prime)
            done_mask_lst.append([done_mask])

        return [self.state_batch(s_lst),torch.Tensor(a_lst),
               torch.Tensor(r_lst),
               self.state_batch(s_prime_lst), 
               torch.Tensor(done_mask_lst)]

    def size(self):
        return len(self.buffer)

    def state_batch(self, s_lst):
        adj_list = []
        node_f_list = []
        candi_list = []
        candi_w_list = []
        mask_list = []
        for s in s_lst:
            adj, node_f, candi, candi_w, mask = s
            adj_list.append(adj)
            node_f_list.append(node_f)
            candi_list.append(candi)
            candi_w_list.append(candi_w)
            mask_list.append(mask)

        adj_list = torch.stack(adj_list, dim=0)
        node_f_list = torch.stack(node_f_list, dim=0)
        candi_list = torch.stack(candi_list, dim=0)
        candi_w_list = torch.stack(candi_w_list, dim=0)
        mask_list = torch.stack(mask_list, dim=0)

        return [adj_list, node_f_list, candi_list, candi_w_list, mask_list]
