# encoding:UTF-8
"""Code from https://github.com/tambetm/simple_dqn/blob/master/src/replay_memory.py"""

import random
import numpy as np

class ReplayBuffer(object):
    def __init__(self, config):
        self.buffer_size = config['replay_buffer_size']  # 缓存池的最大容量
        self.batch_size = config['batch_size']    # mini_batch_size 大小
        self.current = 0   # 指针指向的索引号，下一帧新数据存储的位置

        """ expericence replay buffer  定义经验池，形式为(s,a,r,s+1.terminal) """
        self.actions = np.empty((self.buffer_size,1), dtype=np.uint8)
        self.rewards = np.empty((self.buffer_size,1), dtype=np.int8)
        self.states = np.empty((self.buffer_size, 4), dtype=np.float32)
        self.nextstates =  np.empty((self.buffer_size, 4), dtype=np.float32)
        self.terminals = np.empty((self.buffer_size,1), dtype=np.bool)


    def add(self, *args):
        if args[0].shape[0]==self.batch_size:
            # current指示当前的加入位置
            if self.current + self.batch_size-1 <self.buffer_size:
                self.states[self.current:self.current+self.batch_size, ...] = args[0]
                self.actions[self.current:self.current+self.batch_size, ...] = args[1]
                self.rewards[self.current:self.current+self.batch_size, ...] = args[2]
                self.nextstates[self.current:self.current+self.batch_size, ...] = args[3]
                self.terminals[self.current:self.current+self.batch_size, ...] = args[4]
            else:
                border_1 = self.buffer_size-self.current
                border_2 = self.batch_size-(self.buffer_size-self.current)
                # 先填补至经验池末端
                self.states[self.current:self.buffer_size, ...] = args[0][0:border_1, ...]
                self.actions[self.current:self.buffer_size, ...] = args[1][0:border_1, ...]
                self.rewards[self.current:self.buffer_size, ...] = args[2][0:border_1, ...]
                self.nextstates[self.current:self.buffer_size, ...] = args[3][0:border_1, ...]
                self.terminals[self.current:self.buffer_size, ...] = args[4][0:border_1, ...]
                # 再从经验池前端开始补剩下的
                self.states[0:border_2, ...] = args[0][border_1:, ...]
                self.actions[0:border_2, ...] = args[1][border_1:, ...]
                self.rewards[0:border_2, ...] = args[2][border_1:, ...]
                self.nextstates[0:border_2, ...] = args[3][self.buffer_size-self.current:, ...]
                self.terminals[0:border_2, ...] = args[4][border_1:, ...]
            self.current = (self.current + self.batch_size) % self.buffer_size  # 通过取余完成循环队列
        else:
            self.states[self.current, ...] = args[0]
            self.actions[self.current, ...] = args[1]
            self.rewards[self.current, ...] = args[2]
            self.nextstates[self.current, ...] = args[3]
            self.terminals[self.current, ...] = args[4]
            self.current = (self.current + 1) % self.buffer_size  # 通过取余完成循环队列


    def sample(self):
        # sample random indexes
        indexes = np.arange(0, self.buffer_size, 1, dtype=np.int16) # 生成[0,buffer_size-1]的整数序列
        np.random.shuffle(indexes)
        indexes = indexes[:self.batch_size]

        # sample data
        states = self.states[indexes]
        actions = self.actions[indexes]
        rewards = self.rewards[indexes]
        nextstates = self.nextstates[indexes]
        terminals = self.terminals[indexes]

        # return s,a,r,s+1,terminal
        return [states,actions,rewards,nextstates,terminals]

