#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# Modified from https://github.com/seungeunrho/minimalRL/blob/master/dqn.py

import random
import collections
import numpy as np

Exp = collections.namedtuple('Exp', ['s', 'a', 'r', 's_', 't'])

class ReplayMemory(object):
    def __init__(self, max_size):
        self.buffer = collections.deque(maxlen=max_size)

    def append(self, exp):
        self.buffer.append(exp)

    def sample(self, batch_size):
        minibatch = random.sample(self.buffer, batch_size)

        # state_batch, action_batch, reward_batch, next_state_batch, done_batch = [], [], [], [], []
        # for experience in minibatch:
        #     s, a, r, s_p, done = experience
        #     state_batch.append(s)
        #     action_batch.append(a)
        #     reward_batch.append(r)
        #     next_state_batch.append(s_p)
        #     done_batch.append(done)

        state_batch = [data.s for data in minibatch]
        action_batch = [data.a for data in minibatch]
        reward_batch = [data.r for data in minibatch]
        next_state_batch = [data.s_ for data in minibatch]
        done_batch = [data.t for data in minibatch]

        return np.array(state_batch), \
            action_batch, np.array(reward_batch).astype('float32'),\
            np.array(next_state_batch), np.array(done_batch)

    def __len__(self):
        return len(self.buffer)
